1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/sched/mm.h>
39#include <linux/shmem_fs.h>
40#include <linux/hugetlb.h>
41#include <linux/pagemap.h>
42#include <linux/smp.h>
43#include <linux/page-flags.h>
44#include <linux/backing-dev.h>
45#include <linux/bit_spinlock.h>
46#include <linux/rcupdate.h>
47#include <linux/limits.h>
48#include <linux/export.h>
49#include <linux/mutex.h>
50#include <linux/rbtree.h>
51#include <linux/slab.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/spinlock.h>
55#include <linux/eventfd.h>
56#include <linux/poll.h>
57#include <linux/sort.h>
58#include <linux/fs.h>
59#include <linux/seq_file.h>
60#include <linux/vmpressure.h>
61#include <linux/mm_inline.h>
62#include <linux/swap_cgroup.h>
63#include <linux/cpu.h>
64#include <linux/oom.h>
65#include <linux/lockdep.h>
66#include <linux/file.h>
67#include <linux/tracehook.h>
68#include "internal.h"
69#include <net/sock.h>
70#include <net/ip.h>
71#include "slab.h"
72
73#include <linux/uaccess.h>
74
75#include <trace/events/vmscan.h>
76
77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
79
80struct mem_cgroup *root_mem_cgroup __read_mostly;
81
82#define MEM_CGROUP_RECLAIM_RETRIES 5
83
84
85static bool cgroup_memory_nosocket;
86
87
88static bool cgroup_memory_nokmem;
89
90
91#ifdef CONFIG_MEMCG_SWAP
92int do_swap_account __read_mostly;
93#else
94#define do_swap_account 0
95#endif
96
97
98static bool do_memsw_account(void)
99{
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101}
102
103static const char *const mem_cgroup_lru_names[] = {
104 "inactive_anon",
105 "active_anon",
106 "inactive_file",
107 "active_file",
108 "unevictable",
109};
110
111#define THRESHOLDS_EVENTS_TARGET 128
112#define SOFTLIMIT_EVENTS_TARGET 1024
113#define NUMAINFO_EVENTS_TARGET 1024
114
115
116
117
118
119
120struct mem_cgroup_tree_per_node {
121 struct rb_root rb_root;
122 spinlock_t lock;
123};
124
125struct mem_cgroup_tree {
126 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
127};
128
129static struct mem_cgroup_tree soft_limit_tree __read_mostly;
130
131
132struct mem_cgroup_eventfd_list {
133 struct list_head list;
134 struct eventfd_ctx *eventfd;
135};
136
137
138
139
140struct mem_cgroup_event {
141
142
143
144 struct mem_cgroup *memcg;
145
146
147
148 struct eventfd_ctx *eventfd;
149
150
151
152 struct list_head list;
153
154
155
156
157
158 int (*register_event)(struct mem_cgroup *memcg,
159 struct eventfd_ctx *eventfd, const char *args);
160
161
162
163
164
165 void (*unregister_event)(struct mem_cgroup *memcg,
166 struct eventfd_ctx *eventfd);
167
168
169
170
171 poll_table pt;
172 wait_queue_head_t *wqh;
173 wait_queue_t wait;
174 struct work_struct remove;
175};
176
177static void mem_cgroup_threshold(struct mem_cgroup *memcg);
178static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
179
180
181
182
183
184#define MOVE_ANON 0x1U
185#define MOVE_FILE 0x2U
186#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
187
188
189static struct move_charge_struct {
190 spinlock_t lock;
191 struct mm_struct *mm;
192 struct mem_cgroup *from;
193 struct mem_cgroup *to;
194 unsigned long flags;
195 unsigned long precharge;
196 unsigned long moved_charge;
197 unsigned long moved_swap;
198 struct task_struct *moving_task;
199 wait_queue_head_t waitq;
200} mc = {
201 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
202 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
203};
204
205
206
207
208
209#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
210#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
211
212enum charge_type {
213 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
214 MEM_CGROUP_CHARGE_TYPE_ANON,
215 MEM_CGROUP_CHARGE_TYPE_SWAPOUT,
216 MEM_CGROUP_CHARGE_TYPE_DROP,
217 NR_CHARGE_TYPE,
218};
219
220
221enum res_type {
222 _MEM,
223 _MEMSWAP,
224 _OOM_TYPE,
225 _KMEM,
226 _TCP,
227};
228
229#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
230#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
231#define MEMFILE_ATTR(val) ((val) & 0xffff)
232
233#define OOM_CONTROL (0)
234
235
236struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
237{
238 if (!memcg)
239 memcg = root_mem_cgroup;
240 return &memcg->vmpressure;
241}
242
243struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
244{
245 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
246}
247
248static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
249{
250 return (memcg == root_mem_cgroup);
251}
252
253#ifndef CONFIG_SLOB
254
255
256
257
258
259
260
261
262
263
264
265static DEFINE_IDA(memcg_cache_ida);
266int memcg_nr_cache_ids;
267
268
269static DECLARE_RWSEM(memcg_cache_ids_sem);
270
271void memcg_get_cache_ids(void)
272{
273 down_read(&memcg_cache_ids_sem);
274}
275
276void memcg_put_cache_ids(void)
277{
278 up_read(&memcg_cache_ids_sem);
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293#define MEMCG_CACHES_MIN_SIZE 4
294#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
295
296
297
298
299
300
301
302DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
303EXPORT_SYMBOL(memcg_kmem_enabled_key);
304
305struct workqueue_struct *memcg_kmem_cache_wq;
306
307#endif
308
309
310
311
312
313
314
315
316
317
318
319
320struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
321{
322 struct mem_cgroup *memcg;
323
324 memcg = page->mem_cgroup;
325
326 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
327 memcg = root_mem_cgroup;
328
329 return &memcg->css;
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345ino_t page_cgroup_ino(struct page *page)
346{
347 struct mem_cgroup *memcg;
348 unsigned long ino = 0;
349
350 rcu_read_lock();
351 memcg = READ_ONCE(page->mem_cgroup);
352 while (memcg && !(memcg->css.flags & CSS_ONLINE))
353 memcg = parent_mem_cgroup(memcg);
354 if (memcg)
355 ino = cgroup_ino(memcg->css.cgroup);
356 rcu_read_unlock();
357 return ino;
358}
359
360static struct mem_cgroup_per_node *
361mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
362{
363 int nid = page_to_nid(page);
364
365 return memcg->nodeinfo[nid];
366}
367
368static struct mem_cgroup_tree_per_node *
369soft_limit_tree_node(int nid)
370{
371 return soft_limit_tree.rb_tree_per_node[nid];
372}
373
374static struct mem_cgroup_tree_per_node *
375soft_limit_tree_from_page(struct page *page)
376{
377 int nid = page_to_nid(page);
378
379 return soft_limit_tree.rb_tree_per_node[nid];
380}
381
382static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
383 struct mem_cgroup_tree_per_node *mctz,
384 unsigned long new_usage_in_excess)
385{
386 struct rb_node **p = &mctz->rb_root.rb_node;
387 struct rb_node *parent = NULL;
388 struct mem_cgroup_per_node *mz_node;
389
390 if (mz->on_tree)
391 return;
392
393 mz->usage_in_excess = new_usage_in_excess;
394 if (!mz->usage_in_excess)
395 return;
396 while (*p) {
397 parent = *p;
398 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
399 tree_node);
400 if (mz->usage_in_excess < mz_node->usage_in_excess)
401 p = &(*p)->rb_left;
402
403
404
405
406 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
407 p = &(*p)->rb_right;
408 }
409 rb_link_node(&mz->tree_node, parent, p);
410 rb_insert_color(&mz->tree_node, &mctz->rb_root);
411 mz->on_tree = true;
412}
413
414static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
415 struct mem_cgroup_tree_per_node *mctz)
416{
417 if (!mz->on_tree)
418 return;
419 rb_erase(&mz->tree_node, &mctz->rb_root);
420 mz->on_tree = false;
421}
422
423static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
424 struct mem_cgroup_tree_per_node *mctz)
425{
426 unsigned long flags;
427
428 spin_lock_irqsave(&mctz->lock, flags);
429 __mem_cgroup_remove_exceeded(mz, mctz);
430 spin_unlock_irqrestore(&mctz->lock, flags);
431}
432
433static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
434{
435 unsigned long nr_pages = page_counter_read(&memcg->memory);
436 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
437 unsigned long excess = 0;
438
439 if (nr_pages > soft_limit)
440 excess = nr_pages - soft_limit;
441
442 return excess;
443}
444
445static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
446{
447 unsigned long excess;
448 struct mem_cgroup_per_node *mz;
449 struct mem_cgroup_tree_per_node *mctz;
450
451 mctz = soft_limit_tree_from_page(page);
452 if (!mctz)
453 return;
454
455
456
457
458 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
459 mz = mem_cgroup_page_nodeinfo(memcg, page);
460 excess = soft_limit_excess(memcg);
461
462
463
464
465 if (excess || mz->on_tree) {
466 unsigned long flags;
467
468 spin_lock_irqsave(&mctz->lock, flags);
469
470 if (mz->on_tree)
471 __mem_cgroup_remove_exceeded(mz, mctz);
472
473
474
475
476 __mem_cgroup_insert_exceeded(mz, mctz, excess);
477 spin_unlock_irqrestore(&mctz->lock, flags);
478 }
479 }
480}
481
482static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
483{
484 struct mem_cgroup_tree_per_node *mctz;
485 struct mem_cgroup_per_node *mz;
486 int nid;
487
488 for_each_node(nid) {
489 mz = mem_cgroup_nodeinfo(memcg, nid);
490 mctz = soft_limit_tree_node(nid);
491 if (mctz)
492 mem_cgroup_remove_exceeded(mz, mctz);
493 }
494}
495
496static struct mem_cgroup_per_node *
497__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
498{
499 struct rb_node *rightmost = NULL;
500 struct mem_cgroup_per_node *mz;
501
502retry:
503 mz = NULL;
504 rightmost = rb_last(&mctz->rb_root);
505 if (!rightmost)
506 goto done;
507
508 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
509
510
511
512
513
514 __mem_cgroup_remove_exceeded(mz, mctz);
515 if (!soft_limit_excess(mz->memcg) ||
516 !css_tryget_online(&mz->memcg->css))
517 goto retry;
518done:
519 return mz;
520}
521
522static struct mem_cgroup_per_node *
523mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
524{
525 struct mem_cgroup_per_node *mz;
526
527 spin_lock_irq(&mctz->lock);
528 mz = __mem_cgroup_largest_soft_limit_node(mctz);
529 spin_unlock_irq(&mctz->lock);
530 return mz;
531}
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
556 enum memcg_event_item event)
557{
558 unsigned long val = 0;
559 int cpu;
560
561 for_each_possible_cpu(cpu)
562 val += per_cpu(memcg->stat->events[event], cpu);
563 return val;
564}
565
566static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
567 struct page *page,
568 bool compound, int nr_pages)
569{
570
571
572
573
574 if (PageAnon(page))
575 __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
576 else {
577 __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
578 if (PageSwapBacked(page))
579 __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
580 }
581
582 if (compound) {
583 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
584 __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
585 }
586
587
588 if (nr_pages > 0)
589 __this_cpu_inc(memcg->stat->events[PGPGIN]);
590 else {
591 __this_cpu_inc(memcg->stat->events[PGPGOUT]);
592 nr_pages = -nr_pages;
593 }
594
595 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
596}
597
598unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
599 int nid, unsigned int lru_mask)
600{
601 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
602 unsigned long nr = 0;
603 enum lru_list lru;
604
605 VM_BUG_ON((unsigned)nid >= nr_node_ids);
606
607 for_each_lru(lru) {
608 if (!(BIT(lru) & lru_mask))
609 continue;
610 nr += mem_cgroup_get_lru_size(lruvec, lru);
611 }
612 return nr;
613}
614
615static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
616 unsigned int lru_mask)
617{
618 unsigned long nr = 0;
619 int nid;
620
621 for_each_node_state(nid, N_MEMORY)
622 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
623 return nr;
624}
625
626static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
627 enum mem_cgroup_events_target target)
628{
629 unsigned long val, next;
630
631 val = __this_cpu_read(memcg->stat->nr_page_events);
632 next = __this_cpu_read(memcg->stat->targets[target]);
633
634 if ((long)next - (long)val < 0) {
635 switch (target) {
636 case MEM_CGROUP_TARGET_THRESH:
637 next = val + THRESHOLDS_EVENTS_TARGET;
638 break;
639 case MEM_CGROUP_TARGET_SOFTLIMIT:
640 next = val + SOFTLIMIT_EVENTS_TARGET;
641 break;
642 case MEM_CGROUP_TARGET_NUMAINFO:
643 next = val + NUMAINFO_EVENTS_TARGET;
644 break;
645 default:
646 break;
647 }
648 __this_cpu_write(memcg->stat->targets[target], next);
649 return true;
650 }
651 return false;
652}
653
654
655
656
657
658static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
659{
660
661 if (unlikely(mem_cgroup_event_ratelimit(memcg,
662 MEM_CGROUP_TARGET_THRESH))) {
663 bool do_softlimit;
664 bool do_numainfo __maybe_unused;
665
666 do_softlimit = mem_cgroup_event_ratelimit(memcg,
667 MEM_CGROUP_TARGET_SOFTLIMIT);
668#if MAX_NUMNODES > 1
669 do_numainfo = mem_cgroup_event_ratelimit(memcg,
670 MEM_CGROUP_TARGET_NUMAINFO);
671#endif
672 mem_cgroup_threshold(memcg);
673 if (unlikely(do_softlimit))
674 mem_cgroup_update_tree(memcg, page);
675#if MAX_NUMNODES > 1
676 if (unlikely(do_numainfo))
677 atomic_inc(&memcg->numainfo_events);
678#endif
679 }
680}
681
682struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
683{
684
685
686
687
688
689 if (unlikely(!p))
690 return NULL;
691
692 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
693}
694EXPORT_SYMBOL(mem_cgroup_from_task);
695
696static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
697{
698 struct mem_cgroup *memcg = NULL;
699
700 rcu_read_lock();
701 do {
702
703
704
705
706
707 if (unlikely(!mm))
708 memcg = root_mem_cgroup;
709 else {
710 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
711 if (unlikely(!memcg))
712 memcg = root_mem_cgroup;
713 }
714 } while (!css_tryget_online(&memcg->css));
715 rcu_read_unlock();
716 return memcg;
717}
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
737 struct mem_cgroup *prev,
738 struct mem_cgroup_reclaim_cookie *reclaim)
739{
740 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
741 struct cgroup_subsys_state *css = NULL;
742 struct mem_cgroup *memcg = NULL;
743 struct mem_cgroup *pos = NULL;
744
745 if (mem_cgroup_disabled())
746 return NULL;
747
748 if (!root)
749 root = root_mem_cgroup;
750
751 if (prev && !reclaim)
752 pos = prev;
753
754 if (!root->use_hierarchy && root != root_mem_cgroup) {
755 if (prev)
756 goto out;
757 return root;
758 }
759
760 rcu_read_lock();
761
762 if (reclaim) {
763 struct mem_cgroup_per_node *mz;
764
765 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
766 iter = &mz->iter[reclaim->priority];
767
768 if (prev && reclaim->generation != iter->generation)
769 goto out_unlock;
770
771 while (1) {
772 pos = READ_ONCE(iter->position);
773 if (!pos || css_tryget(&pos->css))
774 break;
775
776
777
778
779
780
781
782
783 (void)cmpxchg(&iter->position, pos, NULL);
784 }
785 }
786
787 if (pos)
788 css = &pos->css;
789
790 for (;;) {
791 css = css_next_descendant_pre(css, &root->css);
792 if (!css) {
793
794
795
796
797
798
799 if (!prev)
800 continue;
801 break;
802 }
803
804
805
806
807
808
809 memcg = mem_cgroup_from_css(css);
810
811 if (css == &root->css)
812 break;
813
814 if (css_tryget(css))
815 break;
816
817 memcg = NULL;
818 }
819
820 if (reclaim) {
821
822
823
824
825
826 (void)cmpxchg(&iter->position, pos, memcg);
827
828 if (pos)
829 css_put(&pos->css);
830
831 if (!memcg)
832 iter->generation++;
833 else if (!prev)
834 reclaim->generation = iter->generation;
835 }
836
837out_unlock:
838 rcu_read_unlock();
839out:
840 if (prev && prev != root)
841 css_put(&prev->css);
842
843 return memcg;
844}
845
846
847
848
849
850
851void mem_cgroup_iter_break(struct mem_cgroup *root,
852 struct mem_cgroup *prev)
853{
854 if (!root)
855 root = root_mem_cgroup;
856 if (prev && prev != root)
857 css_put(&prev->css);
858}
859
860static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
861{
862 struct mem_cgroup *memcg = dead_memcg;
863 struct mem_cgroup_reclaim_iter *iter;
864 struct mem_cgroup_per_node *mz;
865 int nid;
866 int i;
867
868 while ((memcg = parent_mem_cgroup(memcg))) {
869 for_each_node(nid) {
870 mz = mem_cgroup_nodeinfo(memcg, nid);
871 for (i = 0; i <= DEF_PRIORITY; i++) {
872 iter = &mz->iter[i];
873 cmpxchg(&iter->position,
874 dead_memcg, NULL);
875 }
876 }
877 }
878}
879
880
881
882
883
884
885#define for_each_mem_cgroup_tree(iter, root) \
886 for (iter = mem_cgroup_iter(root, NULL, NULL); \
887 iter != NULL; \
888 iter = mem_cgroup_iter(root, iter, NULL))
889
890#define for_each_mem_cgroup(iter) \
891 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
892 iter != NULL; \
893 iter = mem_cgroup_iter(NULL, iter, NULL))
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
909 int (*fn)(struct task_struct *, void *), void *arg)
910{
911 struct mem_cgroup *iter;
912 int ret = 0;
913
914 BUG_ON(memcg == root_mem_cgroup);
915
916 for_each_mem_cgroup_tree(iter, memcg) {
917 struct css_task_iter it;
918 struct task_struct *task;
919
920 css_task_iter_start(&iter->css, &it);
921 while (!ret && (task = css_task_iter_next(&it)))
922 ret = fn(task, arg);
923 css_task_iter_end(&it);
924 if (ret) {
925 mem_cgroup_iter_break(memcg, iter);
926 break;
927 }
928 }
929 return ret;
930}
931
932
933
934
935
936
937
938
939
940
941struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
942{
943 struct mem_cgroup_per_node *mz;
944 struct mem_cgroup *memcg;
945 struct lruvec *lruvec;
946
947 if (mem_cgroup_disabled()) {
948 lruvec = &pgdat->lruvec;
949 goto out;
950 }
951
952 memcg = page->mem_cgroup;
953
954
955
956
957 if (!memcg)
958 memcg = root_mem_cgroup;
959
960 mz = mem_cgroup_page_nodeinfo(memcg, page);
961 lruvec = &mz->lruvec;
962out:
963
964
965
966
967
968 if (unlikely(lruvec->pgdat != pgdat))
969 lruvec->pgdat = pgdat;
970 return lruvec;
971}
972
973
974
975
976
977
978
979
980
981
982
983
984void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
985 int zid, int nr_pages)
986{
987 struct mem_cgroup_per_node *mz;
988 unsigned long *lru_size;
989 long size;
990
991 if (mem_cgroup_disabled())
992 return;
993
994 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
995 lru_size = &mz->lru_zone_size[zid][lru];
996
997 if (nr_pages < 0)
998 *lru_size += nr_pages;
999
1000 size = *lru_size;
1001 if (WARN_ONCE(size < 0,
1002 "%s(%p, %d, %d): lru_size %ld\n",
1003 __func__, lruvec, lru, nr_pages, size)) {
1004 VM_BUG_ON(1);
1005 *lru_size = 0;
1006 }
1007
1008 if (nr_pages > 0)
1009 *lru_size += nr_pages;
1010}
1011
1012bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1013{
1014 struct mem_cgroup *task_memcg;
1015 struct task_struct *p;
1016 bool ret;
1017
1018 p = find_lock_task_mm(task);
1019 if (p) {
1020 task_memcg = get_mem_cgroup_from_mm(p->mm);
1021 task_unlock(p);
1022 } else {
1023
1024
1025
1026
1027
1028 rcu_read_lock();
1029 task_memcg = mem_cgroup_from_task(task);
1030 css_get(&task_memcg->css);
1031 rcu_read_unlock();
1032 }
1033 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1034 css_put(&task_memcg->css);
1035 return ret;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1046{
1047 unsigned long margin = 0;
1048 unsigned long count;
1049 unsigned long limit;
1050
1051 count = page_counter_read(&memcg->memory);
1052 limit = READ_ONCE(memcg->memory.limit);
1053 if (count < limit)
1054 margin = limit - count;
1055
1056 if (do_memsw_account()) {
1057 count = page_counter_read(&memcg->memsw);
1058 limit = READ_ONCE(memcg->memsw.limit);
1059 if (count <= limit)
1060 margin = min(margin, limit - count);
1061 else
1062 margin = 0;
1063 }
1064
1065 return margin;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1076{
1077 struct mem_cgroup *from;
1078 struct mem_cgroup *to;
1079 bool ret = false;
1080
1081
1082
1083
1084 spin_lock(&mc.lock);
1085 from = mc.from;
1086 to = mc.to;
1087 if (!from)
1088 goto unlock;
1089
1090 ret = mem_cgroup_is_descendant(from, memcg) ||
1091 mem_cgroup_is_descendant(to, memcg);
1092unlock:
1093 spin_unlock(&mc.lock);
1094 return ret;
1095}
1096
1097static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1098{
1099 if (mc.moving_task && current != mc.moving_task) {
1100 if (mem_cgroup_under_move(memcg)) {
1101 DEFINE_WAIT(wait);
1102 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1103
1104 if (mc.moving_task)
1105 schedule();
1106 finish_wait(&mc.waitq, &wait);
1107 return true;
1108 }
1109 }
1110 return false;
1111}
1112
1113unsigned int memcg1_stats[] = {
1114 MEMCG_CACHE,
1115 MEMCG_RSS,
1116 MEMCG_RSS_HUGE,
1117 NR_SHMEM,
1118 NR_FILE_MAPPED,
1119 NR_FILE_DIRTY,
1120 NR_WRITEBACK,
1121 MEMCG_SWAP,
1122};
1123
1124static const char *const memcg1_stat_names[] = {
1125 "cache",
1126 "rss",
1127 "rss_huge",
1128 "shmem",
1129 "mapped_file",
1130 "dirty",
1131 "writeback",
1132 "swap",
1133};
1134
1135#define K(x) ((x) << (PAGE_SHIFT-10))
1136
1137
1138
1139
1140
1141
1142
1143
1144void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1145{
1146 struct mem_cgroup *iter;
1147 unsigned int i;
1148
1149 rcu_read_lock();
1150
1151 if (p) {
1152 pr_info("Task in ");
1153 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1154 pr_cont(" killed as a result of limit of ");
1155 } else {
1156 pr_info("Memory limit reached of cgroup ");
1157 }
1158
1159 pr_cont_cgroup_path(memcg->css.cgroup);
1160 pr_cont("\n");
1161
1162 rcu_read_unlock();
1163
1164 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1165 K((u64)page_counter_read(&memcg->memory)),
1166 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1167 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1168 K((u64)page_counter_read(&memcg->memsw)),
1169 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1170 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1171 K((u64)page_counter_read(&memcg->kmem)),
1172 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1173
1174 for_each_mem_cgroup_tree(iter, memcg) {
1175 pr_info("Memory cgroup stats for ");
1176 pr_cont_cgroup_path(iter->css.cgroup);
1177 pr_cont(":");
1178
1179 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1181 continue;
1182 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1183 K(memcg_page_state(iter, memcg1_stats[i])));
1184 }
1185
1186 for (i = 0; i < NR_LRU_LISTS; i++)
1187 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1188 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1189
1190 pr_cont("\n");
1191 }
1192}
1193
1194
1195
1196
1197
1198static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1199{
1200 int num = 0;
1201 struct mem_cgroup *iter;
1202
1203 for_each_mem_cgroup_tree(iter, memcg)
1204 num++;
1205 return num;
1206}
1207
1208
1209
1210
1211unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1212{
1213 unsigned long limit;
1214
1215 limit = memcg->memory.limit;
1216 if (mem_cgroup_swappiness(memcg)) {
1217 unsigned long memsw_limit;
1218 unsigned long swap_limit;
1219
1220 memsw_limit = memcg->memsw.limit;
1221 swap_limit = memcg->swap.limit;
1222 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1223 limit = min(limit + swap_limit, memsw_limit);
1224 }
1225 return limit;
1226}
1227
1228static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1229 int order)
1230{
1231 struct oom_control oc = {
1232 .zonelist = NULL,
1233 .nodemask = NULL,
1234 .memcg = memcg,
1235 .gfp_mask = gfp_mask,
1236 .order = order,
1237 };
1238 bool ret;
1239
1240 mutex_lock(&oom_lock);
1241 ret = out_of_memory(&oc);
1242 mutex_unlock(&oom_lock);
1243 return ret;
1244}
1245
1246#if MAX_NUMNODES > 1
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1259 int nid, bool noswap)
1260{
1261 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1262 return true;
1263 if (noswap || !total_swap_pages)
1264 return false;
1265 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1266 return true;
1267 return false;
1268
1269}
1270
1271
1272
1273
1274
1275
1276
1277static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1278{
1279 int nid;
1280
1281
1282
1283
1284 if (!atomic_read(&memcg->numainfo_events))
1285 return;
1286 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1287 return;
1288
1289
1290 memcg->scan_nodes = node_states[N_MEMORY];
1291
1292 for_each_node_mask(nid, node_states[N_MEMORY]) {
1293
1294 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1295 node_clear(nid, memcg->scan_nodes);
1296 }
1297
1298 atomic_set(&memcg->numainfo_events, 0);
1299 atomic_set(&memcg->numainfo_updating, 0);
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1315{
1316 int node;
1317
1318 mem_cgroup_may_update_nodemask(memcg);
1319 node = memcg->last_scanned_node;
1320
1321 node = next_node_in(node, memcg->scan_nodes);
1322
1323
1324
1325
1326
1327 if (unlikely(node == MAX_NUMNODES))
1328 node = numa_node_id();
1329
1330 memcg->last_scanned_node = node;
1331 return node;
1332}
1333#else
1334int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1335{
1336 return 0;
1337}
1338#endif
1339
1340static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1341 pg_data_t *pgdat,
1342 gfp_t gfp_mask,
1343 unsigned long *total_scanned)
1344{
1345 struct mem_cgroup *victim = NULL;
1346 int total = 0;
1347 int loop = 0;
1348 unsigned long excess;
1349 unsigned long nr_scanned;
1350 struct mem_cgroup_reclaim_cookie reclaim = {
1351 .pgdat = pgdat,
1352 .priority = 0,
1353 };
1354
1355 excess = soft_limit_excess(root_memcg);
1356
1357 while (1) {
1358 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1359 if (!victim) {
1360 loop++;
1361 if (loop >= 2) {
1362
1363
1364
1365
1366
1367 if (!total)
1368 break;
1369
1370
1371
1372
1373
1374
1375 if (total >= (excess >> 2) ||
1376 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1377 break;
1378 }
1379 continue;
1380 }
1381 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1382 pgdat, &nr_scanned);
1383 *total_scanned += nr_scanned;
1384 if (!soft_limit_excess(root_memcg))
1385 break;
1386 }
1387 mem_cgroup_iter_break(root_memcg, victim);
1388 return total;
1389}
1390
1391#ifdef CONFIG_LOCKDEP
1392static struct lockdep_map memcg_oom_lock_dep_map = {
1393 .name = "memcg_oom_lock",
1394};
1395#endif
1396
1397static DEFINE_SPINLOCK(memcg_oom_lock);
1398
1399
1400
1401
1402
1403static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1404{
1405 struct mem_cgroup *iter, *failed = NULL;
1406
1407 spin_lock(&memcg_oom_lock);
1408
1409 for_each_mem_cgroup_tree(iter, memcg) {
1410 if (iter->oom_lock) {
1411
1412
1413
1414
1415 failed = iter;
1416 mem_cgroup_iter_break(memcg, iter);
1417 break;
1418 } else
1419 iter->oom_lock = true;
1420 }
1421
1422 if (failed) {
1423
1424
1425
1426
1427 for_each_mem_cgroup_tree(iter, memcg) {
1428 if (iter == failed) {
1429 mem_cgroup_iter_break(memcg, iter);
1430 break;
1431 }
1432 iter->oom_lock = false;
1433 }
1434 } else
1435 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1436
1437 spin_unlock(&memcg_oom_lock);
1438
1439 return !failed;
1440}
1441
1442static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1443{
1444 struct mem_cgroup *iter;
1445
1446 spin_lock(&memcg_oom_lock);
1447 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1448 for_each_mem_cgroup_tree(iter, memcg)
1449 iter->oom_lock = false;
1450 spin_unlock(&memcg_oom_lock);
1451}
1452
1453static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1454{
1455 struct mem_cgroup *iter;
1456
1457 spin_lock(&memcg_oom_lock);
1458 for_each_mem_cgroup_tree(iter, memcg)
1459 iter->under_oom++;
1460 spin_unlock(&memcg_oom_lock);
1461}
1462
1463static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1464{
1465 struct mem_cgroup *iter;
1466
1467
1468
1469
1470
1471 spin_lock(&memcg_oom_lock);
1472 for_each_mem_cgroup_tree(iter, memcg)
1473 if (iter->under_oom > 0)
1474 iter->under_oom--;
1475 spin_unlock(&memcg_oom_lock);
1476}
1477
1478static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1479
1480struct oom_wait_info {
1481 struct mem_cgroup *memcg;
1482 wait_queue_t wait;
1483};
1484
1485static int memcg_oom_wake_function(wait_queue_t *wait,
1486 unsigned mode, int sync, void *arg)
1487{
1488 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1489 struct mem_cgroup *oom_wait_memcg;
1490 struct oom_wait_info *oom_wait_info;
1491
1492 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1493 oom_wait_memcg = oom_wait_info->memcg;
1494
1495 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1496 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1497 return 0;
1498 return autoremove_wake_function(wait, mode, sync, arg);
1499}
1500
1501static void memcg_oom_recover(struct mem_cgroup *memcg)
1502{
1503
1504
1505
1506
1507
1508
1509
1510
1511 if (memcg && memcg->under_oom)
1512 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1513}
1514
1515static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1516{
1517 if (!current->memcg_may_oom)
1518 return;
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533 css_get(&memcg->css);
1534 current->memcg_in_oom = memcg;
1535 current->memcg_oom_gfp_mask = mask;
1536 current->memcg_oom_order = order;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556bool mem_cgroup_oom_synchronize(bool handle)
1557{
1558 struct mem_cgroup *memcg = current->memcg_in_oom;
1559 struct oom_wait_info owait;
1560 bool locked;
1561
1562
1563 if (!memcg)
1564 return false;
1565
1566 if (!handle)
1567 goto cleanup;
1568
1569 owait.memcg = memcg;
1570 owait.wait.flags = 0;
1571 owait.wait.func = memcg_oom_wake_function;
1572 owait.wait.private = current;
1573 INIT_LIST_HEAD(&owait.wait.task_list);
1574
1575 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1576 mem_cgroup_mark_under_oom(memcg);
1577
1578 locked = mem_cgroup_oom_trylock(memcg);
1579
1580 if (locked)
1581 mem_cgroup_oom_notify(memcg);
1582
1583 if (locked && !memcg->oom_kill_disable) {
1584 mem_cgroup_unmark_under_oom(memcg);
1585 finish_wait(&memcg_oom_waitq, &owait.wait);
1586 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1587 current->memcg_oom_order);
1588 } else {
1589 schedule();
1590 mem_cgroup_unmark_under_oom(memcg);
1591 finish_wait(&memcg_oom_waitq, &owait.wait);
1592 }
1593
1594 if (locked) {
1595 mem_cgroup_oom_unlock(memcg);
1596
1597
1598
1599
1600
1601 memcg_oom_recover(memcg);
1602 }
1603cleanup:
1604 current->memcg_in_oom = NULL;
1605 css_put(&memcg->css);
1606 return true;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616void lock_page_memcg(struct page *page)
1617{
1618 struct mem_cgroup *memcg;
1619 unsigned long flags;
1620
1621
1622
1623
1624
1625
1626 rcu_read_lock();
1627
1628 if (mem_cgroup_disabled())
1629 return;
1630again:
1631 memcg = page->mem_cgroup;
1632 if (unlikely(!memcg))
1633 return;
1634
1635 if (atomic_read(&memcg->moving_account) <= 0)
1636 return;
1637
1638 spin_lock_irqsave(&memcg->move_lock, flags);
1639 if (memcg != page->mem_cgroup) {
1640 spin_unlock_irqrestore(&memcg->move_lock, flags);
1641 goto again;
1642 }
1643
1644
1645
1646
1647
1648
1649 memcg->move_lock_task = current;
1650 memcg->move_lock_flags = flags;
1651
1652 return;
1653}
1654EXPORT_SYMBOL(lock_page_memcg);
1655
1656
1657
1658
1659
1660void unlock_page_memcg(struct page *page)
1661{
1662 struct mem_cgroup *memcg = page->mem_cgroup;
1663
1664 if (memcg && memcg->move_lock_task == current) {
1665 unsigned long flags = memcg->move_lock_flags;
1666
1667 memcg->move_lock_task = NULL;
1668 memcg->move_lock_flags = 0;
1669
1670 spin_unlock_irqrestore(&memcg->move_lock, flags);
1671 }
1672
1673 rcu_read_unlock();
1674}
1675EXPORT_SYMBOL(unlock_page_memcg);
1676
1677
1678
1679
1680
1681#define CHARGE_BATCH 32U
1682struct memcg_stock_pcp {
1683 struct mem_cgroup *cached;
1684 unsigned int nr_pages;
1685 struct work_struct work;
1686 unsigned long flags;
1687#define FLUSHING_CACHED_CHARGE 0
1688};
1689static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1690static DEFINE_MUTEX(percpu_charge_mutex);
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1704{
1705 struct memcg_stock_pcp *stock;
1706 unsigned long flags;
1707 bool ret = false;
1708
1709 if (nr_pages > CHARGE_BATCH)
1710 return ret;
1711
1712 local_irq_save(flags);
1713
1714 stock = this_cpu_ptr(&memcg_stock);
1715 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1716 stock->nr_pages -= nr_pages;
1717 ret = true;
1718 }
1719
1720 local_irq_restore(flags);
1721
1722 return ret;
1723}
1724
1725
1726
1727
1728static void drain_stock(struct memcg_stock_pcp *stock)
1729{
1730 struct mem_cgroup *old = stock->cached;
1731
1732 if (stock->nr_pages) {
1733 page_counter_uncharge(&old->memory, stock->nr_pages);
1734 if (do_memsw_account())
1735 page_counter_uncharge(&old->memsw, stock->nr_pages);
1736 css_put_many(&old->css, stock->nr_pages);
1737 stock->nr_pages = 0;
1738 }
1739 stock->cached = NULL;
1740}
1741
1742static void drain_local_stock(struct work_struct *dummy)
1743{
1744 struct memcg_stock_pcp *stock;
1745 unsigned long flags;
1746
1747 local_irq_save(flags);
1748
1749 stock = this_cpu_ptr(&memcg_stock);
1750 drain_stock(stock);
1751 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1752
1753 local_irq_restore(flags);
1754}
1755
1756
1757
1758
1759
1760static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1761{
1762 struct memcg_stock_pcp *stock;
1763 unsigned long flags;
1764
1765 local_irq_save(flags);
1766
1767 stock = this_cpu_ptr(&memcg_stock);
1768 if (stock->cached != memcg) {
1769 drain_stock(stock);
1770 stock->cached = memcg;
1771 }
1772 stock->nr_pages += nr_pages;
1773
1774 local_irq_restore(flags);
1775}
1776
1777
1778
1779
1780
1781static void drain_all_stock(struct mem_cgroup *root_memcg)
1782{
1783 int cpu, curcpu;
1784
1785
1786 if (!mutex_trylock(&percpu_charge_mutex))
1787 return;
1788
1789 get_online_cpus();
1790 curcpu = get_cpu();
1791 for_each_online_cpu(cpu) {
1792 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1793 struct mem_cgroup *memcg;
1794
1795 memcg = stock->cached;
1796 if (!memcg || !stock->nr_pages)
1797 continue;
1798 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1799 continue;
1800 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1801 if (cpu == curcpu)
1802 drain_local_stock(&stock->work);
1803 else
1804 schedule_work_on(cpu, &stock->work);
1805 }
1806 }
1807 put_cpu();
1808 put_online_cpus();
1809 mutex_unlock(&percpu_charge_mutex);
1810}
1811
1812static int memcg_hotplug_cpu_dead(unsigned int cpu)
1813{
1814 struct memcg_stock_pcp *stock;
1815
1816 stock = &per_cpu(memcg_stock, cpu);
1817 drain_stock(stock);
1818 return 0;
1819}
1820
1821static void reclaim_high(struct mem_cgroup *memcg,
1822 unsigned int nr_pages,
1823 gfp_t gfp_mask)
1824{
1825 do {
1826 if (page_counter_read(&memcg->memory) <= memcg->high)
1827 continue;
1828 mem_cgroup_event(memcg, MEMCG_HIGH);
1829 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1830 } while ((memcg = parent_mem_cgroup(memcg)));
1831}
1832
1833static void high_work_func(struct work_struct *work)
1834{
1835 struct mem_cgroup *memcg;
1836
1837 memcg = container_of(work, struct mem_cgroup, high_work);
1838 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1839}
1840
1841
1842
1843
1844
1845void mem_cgroup_handle_over_high(void)
1846{
1847 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1848 struct mem_cgroup *memcg;
1849
1850 if (likely(!nr_pages))
1851 return;
1852
1853 memcg = get_mem_cgroup_from_mm(current->mm);
1854 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1855 css_put(&memcg->css);
1856 current->memcg_nr_pages_over_high = 0;
1857}
1858
1859static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1860 unsigned int nr_pages)
1861{
1862 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1863 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1864 struct mem_cgroup *mem_over_limit;
1865 struct page_counter *counter;
1866 unsigned long nr_reclaimed;
1867 bool may_swap = true;
1868 bool drained = false;
1869
1870 if (mem_cgroup_is_root(memcg))
1871 return 0;
1872retry:
1873 if (consume_stock(memcg, nr_pages))
1874 return 0;
1875
1876 if (!do_memsw_account() ||
1877 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1878 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1879 goto done_restock;
1880 if (do_memsw_account())
1881 page_counter_uncharge(&memcg->memsw, batch);
1882 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1883 } else {
1884 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1885 may_swap = false;
1886 }
1887
1888 if (batch > nr_pages) {
1889 batch = nr_pages;
1890 goto retry;
1891 }
1892
1893
1894
1895
1896
1897
1898
1899 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1900 fatal_signal_pending(current) ||
1901 current->flags & PF_EXITING))
1902 goto force;
1903
1904
1905
1906
1907
1908
1909
1910 if (unlikely(current->flags & PF_MEMALLOC))
1911 goto force;
1912
1913 if (unlikely(task_in_memcg_oom(current)))
1914 goto nomem;
1915
1916 if (!gfpflags_allow_blocking(gfp_mask))
1917 goto nomem;
1918
1919 mem_cgroup_event(mem_over_limit, MEMCG_MAX);
1920
1921 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1922 gfp_mask, may_swap);
1923
1924 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1925 goto retry;
1926
1927 if (!drained) {
1928 drain_all_stock(mem_over_limit);
1929 drained = true;
1930 goto retry;
1931 }
1932
1933 if (gfp_mask & __GFP_NORETRY)
1934 goto nomem;
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1945 goto retry;
1946
1947
1948
1949
1950 if (mem_cgroup_wait_acct_move(mem_over_limit))
1951 goto retry;
1952
1953 if (nr_retries--)
1954 goto retry;
1955
1956 if (gfp_mask & __GFP_NOFAIL)
1957 goto force;
1958
1959 if (fatal_signal_pending(current))
1960 goto force;
1961
1962 mem_cgroup_event(mem_over_limit, MEMCG_OOM);
1963
1964 mem_cgroup_oom(mem_over_limit, gfp_mask,
1965 get_order(nr_pages * PAGE_SIZE));
1966nomem:
1967 if (!(gfp_mask & __GFP_NOFAIL))
1968 return -ENOMEM;
1969force:
1970
1971
1972
1973
1974
1975 page_counter_charge(&memcg->memory, nr_pages);
1976 if (do_memsw_account())
1977 page_counter_charge(&memcg->memsw, nr_pages);
1978 css_get_many(&memcg->css, nr_pages);
1979
1980 return 0;
1981
1982done_restock:
1983 css_get_many(&memcg->css, batch);
1984 if (batch > nr_pages)
1985 refill_stock(memcg, batch - nr_pages);
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 do {
1997 if (page_counter_read(&memcg->memory) > memcg->high) {
1998
1999 if (in_interrupt()) {
2000 schedule_work(&memcg->high_work);
2001 break;
2002 }
2003 current->memcg_nr_pages_over_high += batch;
2004 set_notify_resume(current);
2005 break;
2006 }
2007 } while ((memcg = parent_mem_cgroup(memcg)));
2008
2009 return 0;
2010}
2011
2012static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2013{
2014 if (mem_cgroup_is_root(memcg))
2015 return;
2016
2017 page_counter_uncharge(&memcg->memory, nr_pages);
2018 if (do_memsw_account())
2019 page_counter_uncharge(&memcg->memsw, nr_pages);
2020
2021 css_put_many(&memcg->css, nr_pages);
2022}
2023
2024static void lock_page_lru(struct page *page, int *isolated)
2025{
2026 struct zone *zone = page_zone(page);
2027
2028 spin_lock_irq(zone_lru_lock(zone));
2029 if (PageLRU(page)) {
2030 struct lruvec *lruvec;
2031
2032 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2033 ClearPageLRU(page);
2034 del_page_from_lru_list(page, lruvec, page_lru(page));
2035 *isolated = 1;
2036 } else
2037 *isolated = 0;
2038}
2039
2040static void unlock_page_lru(struct page *page, int isolated)
2041{
2042 struct zone *zone = page_zone(page);
2043
2044 if (isolated) {
2045 struct lruvec *lruvec;
2046
2047 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2048 VM_BUG_ON_PAGE(PageLRU(page), page);
2049 SetPageLRU(page);
2050 add_page_to_lru_list(page, lruvec, page_lru(page));
2051 }
2052 spin_unlock_irq(zone_lru_lock(zone));
2053}
2054
2055static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2056 bool lrucare)
2057{
2058 int isolated;
2059
2060 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2061
2062
2063
2064
2065
2066 if (lrucare)
2067 lock_page_lru(page, &isolated);
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083 page->mem_cgroup = memcg;
2084
2085 if (lrucare)
2086 unlock_page_lru(page, isolated);
2087}
2088
2089#ifndef CONFIG_SLOB
2090static int memcg_alloc_cache_id(void)
2091{
2092 int id, size;
2093 int err;
2094
2095 id = ida_simple_get(&memcg_cache_ida,
2096 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2097 if (id < 0)
2098 return id;
2099
2100 if (id < memcg_nr_cache_ids)
2101 return id;
2102
2103
2104
2105
2106
2107 down_write(&memcg_cache_ids_sem);
2108
2109 size = 2 * (id + 1);
2110 if (size < MEMCG_CACHES_MIN_SIZE)
2111 size = MEMCG_CACHES_MIN_SIZE;
2112 else if (size > MEMCG_CACHES_MAX_SIZE)
2113 size = MEMCG_CACHES_MAX_SIZE;
2114
2115 err = memcg_update_all_caches(size);
2116 if (!err)
2117 err = memcg_update_all_list_lrus(size);
2118 if (!err)
2119 memcg_nr_cache_ids = size;
2120
2121 up_write(&memcg_cache_ids_sem);
2122
2123 if (err) {
2124 ida_simple_remove(&memcg_cache_ida, id);
2125 return err;
2126 }
2127 return id;
2128}
2129
2130static void memcg_free_cache_id(int id)
2131{
2132 ida_simple_remove(&memcg_cache_ida, id);
2133}
2134
2135struct memcg_kmem_cache_create_work {
2136 struct mem_cgroup *memcg;
2137 struct kmem_cache *cachep;
2138 struct work_struct work;
2139};
2140
2141static void memcg_kmem_cache_create_func(struct work_struct *w)
2142{
2143 struct memcg_kmem_cache_create_work *cw =
2144 container_of(w, struct memcg_kmem_cache_create_work, work);
2145 struct mem_cgroup *memcg = cw->memcg;
2146 struct kmem_cache *cachep = cw->cachep;
2147
2148 memcg_create_kmem_cache(memcg, cachep);
2149
2150 css_put(&memcg->css);
2151 kfree(cw);
2152}
2153
2154
2155
2156
2157static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2158 struct kmem_cache *cachep)
2159{
2160 struct memcg_kmem_cache_create_work *cw;
2161
2162 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2163 if (!cw)
2164 return;
2165
2166 css_get(&memcg->css);
2167
2168 cw->memcg = memcg;
2169 cw->cachep = cachep;
2170 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2171
2172 queue_work(memcg_kmem_cache_wq, &cw->work);
2173}
2174
2175static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2176 struct kmem_cache *cachep)
2177{
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189 current->memcg_kmem_skip_account = 1;
2190 __memcg_schedule_kmem_cache_create(memcg, cachep);
2191 current->memcg_kmem_skip_account = 0;
2192}
2193
2194static inline bool memcg_kmem_bypass(void)
2195{
2196 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2197 return true;
2198 return false;
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2218{
2219 struct mem_cgroup *memcg;
2220 struct kmem_cache *memcg_cachep;
2221 int kmemcg_id;
2222
2223 VM_BUG_ON(!is_root_cache(cachep));
2224
2225 if (memcg_kmem_bypass())
2226 return cachep;
2227
2228 if (current->memcg_kmem_skip_account)
2229 return cachep;
2230
2231 memcg = get_mem_cgroup_from_mm(current->mm);
2232 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2233 if (kmemcg_id < 0)
2234 goto out;
2235
2236 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2237 if (likely(memcg_cachep))
2238 return memcg_cachep;
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252 memcg_schedule_kmem_cache_create(memcg, cachep);
2253out:
2254 css_put(&memcg->css);
2255 return cachep;
2256}
2257
2258
2259
2260
2261
2262void memcg_kmem_put_cache(struct kmem_cache *cachep)
2263{
2264 if (!is_root_cache(cachep))
2265 css_put(&cachep->memcg_params.memcg->css);
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2278 struct mem_cgroup *memcg)
2279{
2280 unsigned int nr_pages = 1 << order;
2281 struct page_counter *counter;
2282 int ret;
2283
2284 ret = try_charge(memcg, gfp, nr_pages);
2285 if (ret)
2286 return ret;
2287
2288 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2289 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2290 cancel_charge(memcg, nr_pages);
2291 return -ENOMEM;
2292 }
2293
2294 page->mem_cgroup = memcg;
2295
2296 return 0;
2297}
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2308{
2309 struct mem_cgroup *memcg;
2310 int ret = 0;
2311
2312 if (memcg_kmem_bypass())
2313 return 0;
2314
2315 memcg = get_mem_cgroup_from_mm(current->mm);
2316 if (!mem_cgroup_is_root(memcg)) {
2317 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2318 if (!ret)
2319 __SetPageKmemcg(page);
2320 }
2321 css_put(&memcg->css);
2322 return ret;
2323}
2324
2325
2326
2327
2328
2329void memcg_kmem_uncharge(struct page *page, int order)
2330{
2331 struct mem_cgroup *memcg = page->mem_cgroup;
2332 unsigned int nr_pages = 1 << order;
2333
2334 if (!memcg)
2335 return;
2336
2337 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2338
2339 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2340 page_counter_uncharge(&memcg->kmem, nr_pages);
2341
2342 page_counter_uncharge(&memcg->memory, nr_pages);
2343 if (do_memsw_account())
2344 page_counter_uncharge(&memcg->memsw, nr_pages);
2345
2346 page->mem_cgroup = NULL;
2347
2348
2349 if (PageKmemcg(page))
2350 __ClearPageKmemcg(page);
2351
2352 css_put_many(&memcg->css, nr_pages);
2353}
2354#endif
2355
2356#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2357
2358
2359
2360
2361
2362void mem_cgroup_split_huge_fixup(struct page *head)
2363{
2364 int i;
2365
2366 if (mem_cgroup_disabled())
2367 return;
2368
2369 for (i = 1; i < HPAGE_PMD_NR; i++)
2370 head[i].mem_cgroup = head->mem_cgroup;
2371
2372 __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
2373 HPAGE_PMD_NR);
2374}
2375#endif
2376
2377#ifdef CONFIG_MEMCG_SWAP
2378static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2379 bool charge)
2380{
2381 int val = (charge) ? 1 : -1;
2382 this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399static int mem_cgroup_move_swap_account(swp_entry_t entry,
2400 struct mem_cgroup *from, struct mem_cgroup *to)
2401{
2402 unsigned short old_id, new_id;
2403
2404 old_id = mem_cgroup_id(from);
2405 new_id = mem_cgroup_id(to);
2406
2407 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2408 mem_cgroup_swap_statistics(from, false);
2409 mem_cgroup_swap_statistics(to, true);
2410 return 0;
2411 }
2412 return -EINVAL;
2413}
2414#else
2415static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2416 struct mem_cgroup *from, struct mem_cgroup *to)
2417{
2418 return -EINVAL;
2419}
2420#endif
2421
2422static DEFINE_MUTEX(memcg_limit_mutex);
2423
2424static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2425 unsigned long limit)
2426{
2427 unsigned long curusage;
2428 unsigned long oldusage;
2429 bool enlarge = false;
2430 int retry_count;
2431 int ret;
2432
2433
2434
2435
2436
2437
2438 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2439 mem_cgroup_count_children(memcg);
2440
2441 oldusage = page_counter_read(&memcg->memory);
2442
2443 do {
2444 if (signal_pending(current)) {
2445 ret = -EINTR;
2446 break;
2447 }
2448
2449 mutex_lock(&memcg_limit_mutex);
2450 if (limit > memcg->memsw.limit) {
2451 mutex_unlock(&memcg_limit_mutex);
2452 ret = -EINVAL;
2453 break;
2454 }
2455 if (limit > memcg->memory.limit)
2456 enlarge = true;
2457 ret = page_counter_limit(&memcg->memory, limit);
2458 mutex_unlock(&memcg_limit_mutex);
2459
2460 if (!ret)
2461 break;
2462
2463 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2464
2465 curusage = page_counter_read(&memcg->memory);
2466
2467 if (curusage >= oldusage)
2468 retry_count--;
2469 else
2470 oldusage = curusage;
2471 } while (retry_count);
2472
2473 if (!ret && enlarge)
2474 memcg_oom_recover(memcg);
2475
2476 return ret;
2477}
2478
2479static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2480 unsigned long limit)
2481{
2482 unsigned long curusage;
2483 unsigned long oldusage;
2484 bool enlarge = false;
2485 int retry_count;
2486 int ret;
2487
2488
2489 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2490 mem_cgroup_count_children(memcg);
2491
2492 oldusage = page_counter_read(&memcg->memsw);
2493
2494 do {
2495 if (signal_pending(current)) {
2496 ret = -EINTR;
2497 break;
2498 }
2499
2500 mutex_lock(&memcg_limit_mutex);
2501 if (limit < memcg->memory.limit) {
2502 mutex_unlock(&memcg_limit_mutex);
2503 ret = -EINVAL;
2504 break;
2505 }
2506 if (limit > memcg->memsw.limit)
2507 enlarge = true;
2508 ret = page_counter_limit(&memcg->memsw, limit);
2509 mutex_unlock(&memcg_limit_mutex);
2510
2511 if (!ret)
2512 break;
2513
2514 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2515
2516 curusage = page_counter_read(&memcg->memsw);
2517
2518 if (curusage >= oldusage)
2519 retry_count--;
2520 else
2521 oldusage = curusage;
2522 } while (retry_count);
2523
2524 if (!ret && enlarge)
2525 memcg_oom_recover(memcg);
2526
2527 return ret;
2528}
2529
2530unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2531 gfp_t gfp_mask,
2532 unsigned long *total_scanned)
2533{
2534 unsigned long nr_reclaimed = 0;
2535 struct mem_cgroup_per_node *mz, *next_mz = NULL;
2536 unsigned long reclaimed;
2537 int loop = 0;
2538 struct mem_cgroup_tree_per_node *mctz;
2539 unsigned long excess;
2540 unsigned long nr_scanned;
2541
2542 if (order > 0)
2543 return 0;
2544
2545 mctz = soft_limit_tree_node(pgdat->node_id);
2546
2547
2548
2549
2550
2551
2552 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2553 return 0;
2554
2555
2556
2557
2558
2559
2560 do {
2561 if (next_mz)
2562 mz = next_mz;
2563 else
2564 mz = mem_cgroup_largest_soft_limit_node(mctz);
2565 if (!mz)
2566 break;
2567
2568 nr_scanned = 0;
2569 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2570 gfp_mask, &nr_scanned);
2571 nr_reclaimed += reclaimed;
2572 *total_scanned += nr_scanned;
2573 spin_lock_irq(&mctz->lock);
2574 __mem_cgroup_remove_exceeded(mz, mctz);
2575
2576
2577
2578
2579
2580 next_mz = NULL;
2581 if (!reclaimed)
2582 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2583
2584 excess = soft_limit_excess(mz->memcg);
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2595 spin_unlock_irq(&mctz->lock);
2596 css_put(&mz->memcg->css);
2597 loop++;
2598
2599
2600
2601
2602
2603 if (!nr_reclaimed &&
2604 (next_mz == NULL ||
2605 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2606 break;
2607 } while (!nr_reclaimed);
2608 if (next_mz)
2609 css_put(&next_mz->memcg->css);
2610 return nr_reclaimed;
2611}
2612
2613
2614
2615
2616
2617
2618
2619static inline bool memcg_has_children(struct mem_cgroup *memcg)
2620{
2621 bool ret;
2622
2623 rcu_read_lock();
2624 ret = css_next_child(NULL, &memcg->css);
2625 rcu_read_unlock();
2626 return ret;
2627}
2628
2629
2630
2631
2632
2633
2634static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2635{
2636 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2637
2638
2639 lru_add_drain_all();
2640
2641 while (nr_retries && page_counter_read(&memcg->memory)) {
2642 int progress;
2643
2644 if (signal_pending(current))
2645 return -EINTR;
2646
2647 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2648 GFP_KERNEL, true);
2649 if (!progress) {
2650 nr_retries--;
2651
2652 congestion_wait(BLK_RW_ASYNC, HZ/10);
2653 }
2654
2655 }
2656
2657 return 0;
2658}
2659
2660static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2661 char *buf, size_t nbytes,
2662 loff_t off)
2663{
2664 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2665
2666 if (mem_cgroup_is_root(memcg))
2667 return -EINVAL;
2668 return mem_cgroup_force_empty(memcg) ?: nbytes;
2669}
2670
2671static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2672 struct cftype *cft)
2673{
2674 return mem_cgroup_from_css(css)->use_hierarchy;
2675}
2676
2677static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2678 struct cftype *cft, u64 val)
2679{
2680 int retval = 0;
2681 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2682 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2683
2684 if (memcg->use_hierarchy == val)
2685 return 0;
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2696 (val == 1 || val == 0)) {
2697 if (!memcg_has_children(memcg))
2698 memcg->use_hierarchy = val;
2699 else
2700 retval = -EBUSY;
2701 } else
2702 retval = -EINVAL;
2703
2704 return retval;
2705}
2706
2707static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2708{
2709 struct mem_cgroup *iter;
2710 int i;
2711
2712 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2713
2714 for_each_mem_cgroup_tree(iter, memcg) {
2715 for (i = 0; i < MEMCG_NR_STAT; i++)
2716 stat[i] += memcg_page_state(iter, i);
2717 }
2718}
2719
2720static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2721{
2722 struct mem_cgroup *iter;
2723 int i;
2724
2725 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2726
2727 for_each_mem_cgroup_tree(iter, memcg) {
2728 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2729 events[i] += memcg_sum_events(iter, i);
2730 }
2731}
2732
2733static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2734{
2735 unsigned long val = 0;
2736
2737 if (mem_cgroup_is_root(memcg)) {
2738 struct mem_cgroup *iter;
2739
2740 for_each_mem_cgroup_tree(iter, memcg) {
2741 val += memcg_page_state(iter, MEMCG_CACHE);
2742 val += memcg_page_state(iter, MEMCG_RSS);
2743 if (swap)
2744 val += memcg_page_state(iter, MEMCG_SWAP);
2745 }
2746 } else {
2747 if (!swap)
2748 val = page_counter_read(&memcg->memory);
2749 else
2750 val = page_counter_read(&memcg->memsw);
2751 }
2752 return val;
2753}
2754
2755enum {
2756 RES_USAGE,
2757 RES_LIMIT,
2758 RES_MAX_USAGE,
2759 RES_FAILCNT,
2760 RES_SOFT_LIMIT,
2761};
2762
2763static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2764 struct cftype *cft)
2765{
2766 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2767 struct page_counter *counter;
2768
2769 switch (MEMFILE_TYPE(cft->private)) {
2770 case _MEM:
2771 counter = &memcg->memory;
2772 break;
2773 case _MEMSWAP:
2774 counter = &memcg->memsw;
2775 break;
2776 case _KMEM:
2777 counter = &memcg->kmem;
2778 break;
2779 case _TCP:
2780 counter = &memcg->tcpmem;
2781 break;
2782 default:
2783 BUG();
2784 }
2785
2786 switch (MEMFILE_ATTR(cft->private)) {
2787 case RES_USAGE:
2788 if (counter == &memcg->memory)
2789 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2790 if (counter == &memcg->memsw)
2791 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2792 return (u64)page_counter_read(counter) * PAGE_SIZE;
2793 case RES_LIMIT:
2794 return (u64)counter->limit * PAGE_SIZE;
2795 case RES_MAX_USAGE:
2796 return (u64)counter->watermark * PAGE_SIZE;
2797 case RES_FAILCNT:
2798 return counter->failcnt;
2799 case RES_SOFT_LIMIT:
2800 return (u64)memcg->soft_limit * PAGE_SIZE;
2801 default:
2802 BUG();
2803 }
2804}
2805
2806#ifndef CONFIG_SLOB
2807static int memcg_online_kmem(struct mem_cgroup *memcg)
2808{
2809 int memcg_id;
2810
2811 if (cgroup_memory_nokmem)
2812 return 0;
2813
2814 BUG_ON(memcg->kmemcg_id >= 0);
2815 BUG_ON(memcg->kmem_state);
2816
2817 memcg_id = memcg_alloc_cache_id();
2818 if (memcg_id < 0)
2819 return memcg_id;
2820
2821 static_branch_inc(&memcg_kmem_enabled_key);
2822
2823
2824
2825
2826
2827
2828 memcg->kmemcg_id = memcg_id;
2829 memcg->kmem_state = KMEM_ONLINE;
2830 INIT_LIST_HEAD(&memcg->kmem_caches);
2831
2832 return 0;
2833}
2834
2835static void memcg_offline_kmem(struct mem_cgroup *memcg)
2836{
2837 struct cgroup_subsys_state *css;
2838 struct mem_cgroup *parent, *child;
2839 int kmemcg_id;
2840
2841 if (memcg->kmem_state != KMEM_ONLINE)
2842 return;
2843
2844
2845
2846
2847
2848
2849 memcg->kmem_state = KMEM_ALLOCATED;
2850
2851 memcg_deactivate_kmem_caches(memcg);
2852
2853 kmemcg_id = memcg->kmemcg_id;
2854 BUG_ON(kmemcg_id < 0);
2855
2856 parent = parent_mem_cgroup(memcg);
2857 if (!parent)
2858 parent = root_mem_cgroup;
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868 rcu_read_lock();
2869 css_for_each_descendant_pre(css, &memcg->css) {
2870 child = mem_cgroup_from_css(css);
2871 BUG_ON(child->kmemcg_id != kmemcg_id);
2872 child->kmemcg_id = parent->kmemcg_id;
2873 if (!memcg->use_hierarchy)
2874 break;
2875 }
2876 rcu_read_unlock();
2877
2878 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2879
2880 memcg_free_cache_id(kmemcg_id);
2881}
2882
2883static void memcg_free_kmem(struct mem_cgroup *memcg)
2884{
2885
2886 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2887 memcg_offline_kmem(memcg);
2888
2889 if (memcg->kmem_state == KMEM_ALLOCATED) {
2890 memcg_destroy_kmem_caches(memcg);
2891 static_branch_dec(&memcg_kmem_enabled_key);
2892 WARN_ON(page_counter_read(&memcg->kmem));
2893 }
2894}
2895#else
2896static int memcg_online_kmem(struct mem_cgroup *memcg)
2897{
2898 return 0;
2899}
2900static void memcg_offline_kmem(struct mem_cgroup *memcg)
2901{
2902}
2903static void memcg_free_kmem(struct mem_cgroup *memcg)
2904{
2905}
2906#endif
2907
2908static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2909 unsigned long limit)
2910{
2911 int ret;
2912
2913 mutex_lock(&memcg_limit_mutex);
2914 ret = page_counter_limit(&memcg->kmem, limit);
2915 mutex_unlock(&memcg_limit_mutex);
2916 return ret;
2917}
2918
2919static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2920{
2921 int ret;
2922
2923 mutex_lock(&memcg_limit_mutex);
2924
2925 ret = page_counter_limit(&memcg->tcpmem, limit);
2926 if (ret)
2927 goto out;
2928
2929 if (!memcg->tcpmem_active) {
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946 static_branch_inc(&memcg_sockets_enabled_key);
2947 memcg->tcpmem_active = true;
2948 }
2949out:
2950 mutex_unlock(&memcg_limit_mutex);
2951 return ret;
2952}
2953
2954
2955
2956
2957
2958static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2959 char *buf, size_t nbytes, loff_t off)
2960{
2961 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2962 unsigned long nr_pages;
2963 int ret;
2964
2965 buf = strstrip(buf);
2966 ret = page_counter_memparse(buf, "-1", &nr_pages);
2967 if (ret)
2968 return ret;
2969
2970 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2971 case RES_LIMIT:
2972 if (mem_cgroup_is_root(memcg)) {
2973 ret = -EINVAL;
2974 break;
2975 }
2976 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2977 case _MEM:
2978 ret = mem_cgroup_resize_limit(memcg, nr_pages);
2979 break;
2980 case _MEMSWAP:
2981 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2982 break;
2983 case _KMEM:
2984 ret = memcg_update_kmem_limit(memcg, nr_pages);
2985 break;
2986 case _TCP:
2987 ret = memcg_update_tcp_limit(memcg, nr_pages);
2988 break;
2989 }
2990 break;
2991 case RES_SOFT_LIMIT:
2992 memcg->soft_limit = nr_pages;
2993 ret = 0;
2994 break;
2995 }
2996 return ret ?: nbytes;
2997}
2998
2999static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3000 size_t nbytes, loff_t off)
3001{
3002 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3003 struct page_counter *counter;
3004
3005 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3006 case _MEM:
3007 counter = &memcg->memory;
3008 break;
3009 case _MEMSWAP:
3010 counter = &memcg->memsw;
3011 break;
3012 case _KMEM:
3013 counter = &memcg->kmem;
3014 break;
3015 case _TCP:
3016 counter = &memcg->tcpmem;
3017 break;
3018 default:
3019 BUG();
3020 }
3021
3022 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3023 case RES_MAX_USAGE:
3024 page_counter_reset_watermark(counter);
3025 break;
3026 case RES_FAILCNT:
3027 counter->failcnt = 0;
3028 break;
3029 default:
3030 BUG();
3031 }
3032
3033 return nbytes;
3034}
3035
3036static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3037 struct cftype *cft)
3038{
3039 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3040}
3041
3042#ifdef CONFIG_MMU
3043static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3044 struct cftype *cft, u64 val)
3045{
3046 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3047
3048 if (val & ~MOVE_MASK)
3049 return -EINVAL;
3050
3051
3052
3053
3054
3055
3056
3057 memcg->move_charge_at_immigrate = val;
3058 return 0;
3059}
3060#else
3061static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3062 struct cftype *cft, u64 val)
3063{
3064 return -ENOSYS;
3065}
3066#endif
3067
3068#ifdef CONFIG_NUMA
3069static int memcg_numa_stat_show(struct seq_file *m, void *v)
3070{
3071 struct numa_stat {
3072 const char *name;
3073 unsigned int lru_mask;
3074 };
3075
3076 static const struct numa_stat stats[] = {
3077 { "total", LRU_ALL },
3078 { "file", LRU_ALL_FILE },
3079 { "anon", LRU_ALL_ANON },
3080 { "unevictable", BIT(LRU_UNEVICTABLE) },
3081 };
3082 const struct numa_stat *stat;
3083 int nid;
3084 unsigned long nr;
3085 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3086
3087 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3088 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3089 seq_printf(m, "%s=%lu", stat->name, nr);
3090 for_each_node_state(nid, N_MEMORY) {
3091 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3092 stat->lru_mask);
3093 seq_printf(m, " N%d=%lu", nid, nr);
3094 }
3095 seq_putc(m, '\n');
3096 }
3097
3098 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3099 struct mem_cgroup *iter;
3100
3101 nr = 0;
3102 for_each_mem_cgroup_tree(iter, memcg)
3103 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3104 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3105 for_each_node_state(nid, N_MEMORY) {
3106 nr = 0;
3107 for_each_mem_cgroup_tree(iter, memcg)
3108 nr += mem_cgroup_node_nr_lru_pages(
3109 iter, nid, stat->lru_mask);
3110 seq_printf(m, " N%d=%lu", nid, nr);
3111 }
3112 seq_putc(m, '\n');
3113 }
3114
3115 return 0;
3116}
3117#endif
3118
3119
3120unsigned int memcg1_events[] = {
3121 PGPGIN,
3122 PGPGOUT,
3123 PGFAULT,
3124 PGMAJFAULT,
3125};
3126
3127static const char *const memcg1_event_names[] = {
3128 "pgpgin",
3129 "pgpgout",
3130 "pgfault",
3131 "pgmajfault",
3132};
3133
3134static int memcg_stat_show(struct seq_file *m, void *v)
3135{
3136 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3137 unsigned long memory, memsw;
3138 struct mem_cgroup *mi;
3139 unsigned int i;
3140
3141 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3142 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3143
3144 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3145 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3146 continue;
3147 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3148 memcg_page_state(memcg, memcg1_stats[i]) *
3149 PAGE_SIZE);
3150 }
3151
3152 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3153 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3154 memcg_sum_events(memcg, memcg1_events[i]));
3155
3156 for (i = 0; i < NR_LRU_LISTS; i++)
3157 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3158 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3159
3160
3161 memory = memsw = PAGE_COUNTER_MAX;
3162 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3163 memory = min(memory, mi->memory.limit);
3164 memsw = min(memsw, mi->memsw.limit);
3165 }
3166 seq_printf(m, "hierarchical_memory_limit %llu\n",
3167 (u64)memory * PAGE_SIZE);
3168 if (do_memsw_account())
3169 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3170 (u64)memsw * PAGE_SIZE);
3171
3172 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3173 unsigned long long val = 0;
3174
3175 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3176 continue;
3177 for_each_mem_cgroup_tree(mi, memcg)
3178 val += memcg_page_state(mi, memcg1_stats[i]) *
3179 PAGE_SIZE;
3180 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3181 }
3182
3183 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
3184 unsigned long long val = 0;
3185
3186 for_each_mem_cgroup_tree(mi, memcg)
3187 val += memcg_sum_events(mi, memcg1_events[i]);
3188 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3189 }
3190
3191 for (i = 0; i < NR_LRU_LISTS; i++) {
3192 unsigned long long val = 0;
3193
3194 for_each_mem_cgroup_tree(mi, memcg)
3195 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3196 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3197 }
3198
3199#ifdef CONFIG_DEBUG_VM
3200 {
3201 pg_data_t *pgdat;
3202 struct mem_cgroup_per_node *mz;
3203 struct zone_reclaim_stat *rstat;
3204 unsigned long recent_rotated[2] = {0, 0};
3205 unsigned long recent_scanned[2] = {0, 0};
3206
3207 for_each_online_pgdat(pgdat) {
3208 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3209 rstat = &mz->lruvec.reclaim_stat;
3210
3211 recent_rotated[0] += rstat->recent_rotated[0];
3212 recent_rotated[1] += rstat->recent_rotated[1];
3213 recent_scanned[0] += rstat->recent_scanned[0];
3214 recent_scanned[1] += rstat->recent_scanned[1];
3215 }
3216 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3217 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3218 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3219 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3220 }
3221#endif
3222
3223 return 0;
3224}
3225
3226static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3227 struct cftype *cft)
3228{
3229 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3230
3231 return mem_cgroup_swappiness(memcg);
3232}
3233
3234static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3235 struct cftype *cft, u64 val)
3236{
3237 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3238
3239 if (val > 100)
3240 return -EINVAL;
3241
3242 if (css->parent)
3243 memcg->swappiness = val;
3244 else
3245 vm_swappiness = val;
3246
3247 return 0;
3248}
3249
3250static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3251{
3252 struct mem_cgroup_threshold_ary *t;
3253 unsigned long usage;
3254 int i;
3255
3256 rcu_read_lock();
3257 if (!swap)
3258 t = rcu_dereference(memcg->thresholds.primary);
3259 else
3260 t = rcu_dereference(memcg->memsw_thresholds.primary);
3261
3262 if (!t)
3263 goto unlock;
3264
3265 usage = mem_cgroup_usage(memcg, swap);
3266
3267
3268
3269
3270
3271
3272 i = t->current_threshold;
3273
3274
3275
3276
3277
3278
3279
3280 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3281 eventfd_signal(t->entries[i].eventfd, 1);
3282
3283
3284 i++;
3285
3286
3287
3288
3289
3290
3291
3292 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3293 eventfd_signal(t->entries[i].eventfd, 1);
3294
3295
3296 t->current_threshold = i - 1;
3297unlock:
3298 rcu_read_unlock();
3299}
3300
3301static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3302{
3303 while (memcg) {
3304 __mem_cgroup_threshold(memcg, false);
3305 if (do_memsw_account())
3306 __mem_cgroup_threshold(memcg, true);
3307
3308 memcg = parent_mem_cgroup(memcg);
3309 }
3310}
3311
3312static int compare_thresholds(const void *a, const void *b)
3313{
3314 const struct mem_cgroup_threshold *_a = a;
3315 const struct mem_cgroup_threshold *_b = b;
3316
3317 if (_a->threshold > _b->threshold)
3318 return 1;
3319
3320 if (_a->threshold < _b->threshold)
3321 return -1;
3322
3323 return 0;
3324}
3325
3326static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3327{
3328 struct mem_cgroup_eventfd_list *ev;
3329
3330 spin_lock(&memcg_oom_lock);
3331
3332 list_for_each_entry(ev, &memcg->oom_notify, list)
3333 eventfd_signal(ev->eventfd, 1);
3334
3335 spin_unlock(&memcg_oom_lock);
3336 return 0;
3337}
3338
3339static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3340{
3341 struct mem_cgroup *iter;
3342
3343 for_each_mem_cgroup_tree(iter, memcg)
3344 mem_cgroup_oom_notify_cb(iter);
3345}
3346
3347static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3348 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3349{
3350 struct mem_cgroup_thresholds *thresholds;
3351 struct mem_cgroup_threshold_ary *new;
3352 unsigned long threshold;
3353 unsigned long usage;
3354 int i, size, ret;
3355
3356 ret = page_counter_memparse(args, "-1", &threshold);
3357 if (ret)
3358 return ret;
3359
3360 mutex_lock(&memcg->thresholds_lock);
3361
3362 if (type == _MEM) {
3363 thresholds = &memcg->thresholds;
3364 usage = mem_cgroup_usage(memcg, false);
3365 } else if (type == _MEMSWAP) {
3366 thresholds = &memcg->memsw_thresholds;
3367 usage = mem_cgroup_usage(memcg, true);
3368 } else
3369 BUG();
3370
3371
3372 if (thresholds->primary)
3373 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3374
3375 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3376
3377
3378 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3379 GFP_KERNEL);
3380 if (!new) {
3381 ret = -ENOMEM;
3382 goto unlock;
3383 }
3384 new->size = size;
3385
3386
3387 if (thresholds->primary) {
3388 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3389 sizeof(struct mem_cgroup_threshold));
3390 }
3391
3392
3393 new->entries[size - 1].eventfd = eventfd;
3394 new->entries[size - 1].threshold = threshold;
3395
3396
3397 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3398 compare_thresholds, NULL);
3399
3400
3401 new->current_threshold = -1;
3402 for (i = 0; i < size; i++) {
3403 if (new->entries[i].threshold <= usage) {
3404
3405
3406
3407
3408
3409 ++new->current_threshold;
3410 } else
3411 break;
3412 }
3413
3414
3415 kfree(thresholds->spare);
3416 thresholds->spare = thresholds->primary;
3417
3418 rcu_assign_pointer(thresholds->primary, new);
3419
3420
3421 synchronize_rcu();
3422
3423unlock:
3424 mutex_unlock(&memcg->thresholds_lock);
3425
3426 return ret;
3427}
3428
3429static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3430 struct eventfd_ctx *eventfd, const char *args)
3431{
3432 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3433}
3434
3435static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3436 struct eventfd_ctx *eventfd, const char *args)
3437{
3438 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3439}
3440
3441static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3442 struct eventfd_ctx *eventfd, enum res_type type)
3443{
3444 struct mem_cgroup_thresholds *thresholds;
3445 struct mem_cgroup_threshold_ary *new;
3446 unsigned long usage;
3447 int i, j, size;
3448
3449 mutex_lock(&memcg->thresholds_lock);
3450
3451 if (type == _MEM) {
3452 thresholds = &memcg->thresholds;
3453 usage = mem_cgroup_usage(memcg, false);
3454 } else if (type == _MEMSWAP) {
3455 thresholds = &memcg->memsw_thresholds;
3456 usage = mem_cgroup_usage(memcg, true);
3457 } else
3458 BUG();
3459
3460 if (!thresholds->primary)
3461 goto unlock;
3462
3463
3464 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3465
3466
3467 size = 0;
3468 for (i = 0; i < thresholds->primary->size; i++) {
3469 if (thresholds->primary->entries[i].eventfd != eventfd)
3470 size++;
3471 }
3472
3473 new = thresholds->spare;
3474
3475
3476 if (!size) {
3477 kfree(new);
3478 new = NULL;
3479 goto swap_buffers;
3480 }
3481
3482 new->size = size;
3483
3484
3485 new->current_threshold = -1;
3486 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3487 if (thresholds->primary->entries[i].eventfd == eventfd)
3488 continue;
3489
3490 new->entries[j] = thresholds->primary->entries[i];
3491 if (new->entries[j].threshold <= usage) {
3492
3493
3494
3495
3496
3497 ++new->current_threshold;
3498 }
3499 j++;
3500 }
3501
3502swap_buffers:
3503
3504 thresholds->spare = thresholds->primary;
3505
3506 rcu_assign_pointer(thresholds->primary, new);
3507
3508
3509 synchronize_rcu();
3510
3511
3512 if (!new) {
3513 kfree(thresholds->spare);
3514 thresholds->spare = NULL;
3515 }
3516unlock:
3517 mutex_unlock(&memcg->thresholds_lock);
3518}
3519
3520static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3521 struct eventfd_ctx *eventfd)
3522{
3523 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3524}
3525
3526static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3527 struct eventfd_ctx *eventfd)
3528{
3529 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3530}
3531
3532static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3533 struct eventfd_ctx *eventfd, const char *args)
3534{
3535 struct mem_cgroup_eventfd_list *event;
3536
3537 event = kmalloc(sizeof(*event), GFP_KERNEL);
3538 if (!event)
3539 return -ENOMEM;
3540
3541 spin_lock(&memcg_oom_lock);
3542
3543 event->eventfd = eventfd;
3544 list_add(&event->list, &memcg->oom_notify);
3545
3546
3547 if (memcg->under_oom)
3548 eventfd_signal(eventfd, 1);
3549 spin_unlock(&memcg_oom_lock);
3550
3551 return 0;
3552}
3553
3554static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3555 struct eventfd_ctx *eventfd)
3556{
3557 struct mem_cgroup_eventfd_list *ev, *tmp;
3558
3559 spin_lock(&memcg_oom_lock);
3560
3561 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3562 if (ev->eventfd == eventfd) {
3563 list_del(&ev->list);
3564 kfree(ev);
3565 }
3566 }
3567
3568 spin_unlock(&memcg_oom_lock);
3569}
3570
3571static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3572{
3573 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3574
3575 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3576 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3577 return 0;
3578}
3579
3580static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3581 struct cftype *cft, u64 val)
3582{
3583 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3584
3585
3586 if (!css->parent || !((val == 0) || (val == 1)))
3587 return -EINVAL;
3588
3589 memcg->oom_kill_disable = val;
3590 if (!val)
3591 memcg_oom_recover(memcg);
3592
3593 return 0;
3594}
3595
3596#ifdef CONFIG_CGROUP_WRITEBACK
3597
3598struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3599{
3600 return &memcg->cgwb_list;
3601}
3602
3603static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3604{
3605 return wb_domain_init(&memcg->cgwb_domain, gfp);
3606}
3607
3608static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3609{
3610 wb_domain_exit(&memcg->cgwb_domain);
3611}
3612
3613static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3614{
3615 wb_domain_size_changed(&memcg->cgwb_domain);
3616}
3617
3618struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3619{
3620 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3621
3622 if (!memcg->css.parent)
3623 return NULL;
3624
3625 return &memcg->cgwb_domain;
3626}
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3647 unsigned long *pheadroom, unsigned long *pdirty,
3648 unsigned long *pwriteback)
3649{
3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3651 struct mem_cgroup *parent;
3652
3653 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3654
3655
3656 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3658 (1 << LRU_ACTIVE_FILE));
3659 *pheadroom = PAGE_COUNTER_MAX;
3660
3661 while ((parent = parent_mem_cgroup(memcg))) {
3662 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3663 unsigned long used = page_counter_read(&memcg->memory);
3664
3665 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3666 memcg = parent;
3667 }
3668}
3669
3670#else
3671
3672static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3673{
3674 return 0;
3675}
3676
3677static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3678{
3679}
3680
3681static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3682{
3683}
3684
3685#endif
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705static void memcg_event_remove(struct work_struct *work)
3706{
3707 struct mem_cgroup_event *event =
3708 container_of(work, struct mem_cgroup_event, remove);
3709 struct mem_cgroup *memcg = event->memcg;
3710
3711 remove_wait_queue(event->wqh, &event->wait);
3712
3713 event->unregister_event(memcg, event->eventfd);
3714
3715
3716 eventfd_signal(event->eventfd, 1);
3717
3718 eventfd_ctx_put(event->eventfd);
3719 kfree(event);
3720 css_put(&memcg->css);
3721}
3722
3723
3724
3725
3726
3727
3728static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3729 int sync, void *key)
3730{
3731 struct mem_cgroup_event *event =
3732 container_of(wait, struct mem_cgroup_event, wait);
3733 struct mem_cgroup *memcg = event->memcg;
3734 unsigned long flags = (unsigned long)key;
3735
3736 if (flags & POLLHUP) {
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746 spin_lock(&memcg->event_list_lock);
3747 if (!list_empty(&event->list)) {
3748 list_del_init(&event->list);
3749
3750
3751
3752
3753 schedule_work(&event->remove);
3754 }
3755 spin_unlock(&memcg->event_list_lock);
3756 }
3757
3758 return 0;
3759}
3760
3761static void memcg_event_ptable_queue_proc(struct file *file,
3762 wait_queue_head_t *wqh, poll_table *pt)
3763{
3764 struct mem_cgroup_event *event =
3765 container_of(pt, struct mem_cgroup_event, pt);
3766
3767 event->wqh = wqh;
3768 add_wait_queue(wqh, &event->wait);
3769}
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3780 char *buf, size_t nbytes, loff_t off)
3781{
3782 struct cgroup_subsys_state *css = of_css(of);
3783 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3784 struct mem_cgroup_event *event;
3785 struct cgroup_subsys_state *cfile_css;
3786 unsigned int efd, cfd;
3787 struct fd efile;
3788 struct fd cfile;
3789 const char *name;
3790 char *endp;
3791 int ret;
3792
3793 buf = strstrip(buf);
3794
3795 efd = simple_strtoul(buf, &endp, 10);
3796 if (*endp != ' ')
3797 return -EINVAL;
3798 buf = endp + 1;
3799
3800 cfd = simple_strtoul(buf, &endp, 10);
3801 if ((*endp != ' ') && (*endp != '\0'))
3802 return -EINVAL;
3803 buf = endp + 1;
3804
3805 event = kzalloc(sizeof(*event), GFP_KERNEL);
3806 if (!event)
3807 return -ENOMEM;
3808
3809 event->memcg = memcg;
3810 INIT_LIST_HEAD(&event->list);
3811 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3812 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3813 INIT_WORK(&event->remove, memcg_event_remove);
3814
3815 efile = fdget(efd);
3816 if (!efile.file) {
3817 ret = -EBADF;
3818 goto out_kfree;
3819 }
3820
3821 event->eventfd = eventfd_ctx_fileget(efile.file);
3822 if (IS_ERR(event->eventfd)) {
3823 ret = PTR_ERR(event->eventfd);
3824 goto out_put_efile;
3825 }
3826
3827 cfile = fdget(cfd);
3828 if (!cfile.file) {
3829 ret = -EBADF;
3830 goto out_put_eventfd;
3831 }
3832
3833
3834
3835 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3836 if (ret < 0)
3837 goto out_put_cfile;
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847 name = cfile.file->f_path.dentry->d_name.name;
3848
3849 if (!strcmp(name, "memory.usage_in_bytes")) {
3850 event->register_event = mem_cgroup_usage_register_event;
3851 event->unregister_event = mem_cgroup_usage_unregister_event;
3852 } else if (!strcmp(name, "memory.oom_control")) {
3853 event->register_event = mem_cgroup_oom_register_event;
3854 event->unregister_event = mem_cgroup_oom_unregister_event;
3855 } else if (!strcmp(name, "memory.pressure_level")) {
3856 event->register_event = vmpressure_register_event;
3857 event->unregister_event = vmpressure_unregister_event;
3858 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3859 event->register_event = memsw_cgroup_usage_register_event;
3860 event->unregister_event = memsw_cgroup_usage_unregister_event;
3861 } else {
3862 ret = -EINVAL;
3863 goto out_put_cfile;
3864 }
3865
3866
3867
3868
3869
3870
3871 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3872 &memory_cgrp_subsys);
3873 ret = -EINVAL;
3874 if (IS_ERR(cfile_css))
3875 goto out_put_cfile;
3876 if (cfile_css != css) {
3877 css_put(cfile_css);
3878 goto out_put_cfile;
3879 }
3880
3881 ret = event->register_event(memcg, event->eventfd, buf);
3882 if (ret)
3883 goto out_put_css;
3884
3885 efile.file->f_op->poll(efile.file, &event->pt);
3886
3887 spin_lock(&memcg->event_list_lock);
3888 list_add(&event->list, &memcg->event_list);
3889 spin_unlock(&memcg->event_list_lock);
3890
3891 fdput(cfile);
3892 fdput(efile);
3893
3894 return nbytes;
3895
3896out_put_css:
3897 css_put(css);
3898out_put_cfile:
3899 fdput(cfile);
3900out_put_eventfd:
3901 eventfd_ctx_put(event->eventfd);
3902out_put_efile:
3903 fdput(efile);
3904out_kfree:
3905 kfree(event);
3906
3907 return ret;
3908}
3909
3910static struct cftype mem_cgroup_legacy_files[] = {
3911 {
3912 .name = "usage_in_bytes",
3913 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3914 .read_u64 = mem_cgroup_read_u64,
3915 },
3916 {
3917 .name = "max_usage_in_bytes",
3918 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3919 .write = mem_cgroup_reset,
3920 .read_u64 = mem_cgroup_read_u64,
3921 },
3922 {
3923 .name = "limit_in_bytes",
3924 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3925 .write = mem_cgroup_write,
3926 .read_u64 = mem_cgroup_read_u64,
3927 },
3928 {
3929 .name = "soft_limit_in_bytes",
3930 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3931 .write = mem_cgroup_write,
3932 .read_u64 = mem_cgroup_read_u64,
3933 },
3934 {
3935 .name = "failcnt",
3936 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3937 .write = mem_cgroup_reset,
3938 .read_u64 = mem_cgroup_read_u64,
3939 },
3940 {
3941 .name = "stat",
3942 .seq_show = memcg_stat_show,
3943 },
3944 {
3945 .name = "force_empty",
3946 .write = mem_cgroup_force_empty_write,
3947 },
3948 {
3949 .name = "use_hierarchy",
3950 .write_u64 = mem_cgroup_hierarchy_write,
3951 .read_u64 = mem_cgroup_hierarchy_read,
3952 },
3953 {
3954 .name = "cgroup.event_control",
3955 .write = memcg_write_event_control,
3956 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3957 },
3958 {
3959 .name = "swappiness",
3960 .read_u64 = mem_cgroup_swappiness_read,
3961 .write_u64 = mem_cgroup_swappiness_write,
3962 },
3963 {
3964 .name = "move_charge_at_immigrate",
3965 .read_u64 = mem_cgroup_move_charge_read,
3966 .write_u64 = mem_cgroup_move_charge_write,
3967 },
3968 {
3969 .name = "oom_control",
3970 .seq_show = mem_cgroup_oom_control_read,
3971 .write_u64 = mem_cgroup_oom_control_write,
3972 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3973 },
3974 {
3975 .name = "pressure_level",
3976 },
3977#ifdef CONFIG_NUMA
3978 {
3979 .name = "numa_stat",
3980 .seq_show = memcg_numa_stat_show,
3981 },
3982#endif
3983 {
3984 .name = "kmem.limit_in_bytes",
3985 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3986 .write = mem_cgroup_write,
3987 .read_u64 = mem_cgroup_read_u64,
3988 },
3989 {
3990 .name = "kmem.usage_in_bytes",
3991 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3992 .read_u64 = mem_cgroup_read_u64,
3993 },
3994 {
3995 .name = "kmem.failcnt",
3996 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3997 .write = mem_cgroup_reset,
3998 .read_u64 = mem_cgroup_read_u64,
3999 },
4000 {
4001 .name = "kmem.max_usage_in_bytes",
4002 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4003 .write = mem_cgroup_reset,
4004 .read_u64 = mem_cgroup_read_u64,
4005 },
4006#ifdef CONFIG_SLABINFO
4007 {
4008 .name = "kmem.slabinfo",
4009 .seq_start = memcg_slab_start,
4010 .seq_next = memcg_slab_next,
4011 .seq_stop = memcg_slab_stop,
4012 .seq_show = memcg_slab_show,
4013 },
4014#endif
4015 {
4016 .name = "kmem.tcp.limit_in_bytes",
4017 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4018 .write = mem_cgroup_write,
4019 .read_u64 = mem_cgroup_read_u64,
4020 },
4021 {
4022 .name = "kmem.tcp.usage_in_bytes",
4023 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4024 .read_u64 = mem_cgroup_read_u64,
4025 },
4026 {
4027 .name = "kmem.tcp.failcnt",
4028 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4029 .write = mem_cgroup_reset,
4030 .read_u64 = mem_cgroup_read_u64,
4031 },
4032 {
4033 .name = "kmem.tcp.max_usage_in_bytes",
4034 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4035 .write = mem_cgroup_reset,
4036 .read_u64 = mem_cgroup_read_u64,
4037 },
4038 { },
4039};
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065static DEFINE_IDR(mem_cgroup_idr);
4066
4067static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4068{
4069 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4070 atomic_add(n, &memcg->id.ref);
4071}
4072
4073static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4074{
4075 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4076 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4077 idr_remove(&mem_cgroup_idr, memcg->id.id);
4078 memcg->id.id = 0;
4079
4080
4081 css_put(&memcg->css);
4082 }
4083}
4084
4085static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4086{
4087 mem_cgroup_id_get_many(memcg, 1);
4088}
4089
4090static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4091{
4092 mem_cgroup_id_put_many(memcg, 1);
4093}
4094
4095
4096
4097
4098
4099
4100
4101struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4102{
4103 WARN_ON_ONCE(!rcu_read_lock_held());
4104 return idr_find(&mem_cgroup_idr, id);
4105}
4106
4107static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4108{
4109 struct mem_cgroup_per_node *pn;
4110 int tmp = node;
4111
4112
4113
4114
4115
4116
4117
4118
4119 if (!node_state(node, N_NORMAL_MEMORY))
4120 tmp = -1;
4121 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4122 if (!pn)
4123 return 1;
4124
4125 lruvec_init(&pn->lruvec);
4126 pn->usage_in_excess = 0;
4127 pn->on_tree = false;
4128 pn->memcg = memcg;
4129
4130 memcg->nodeinfo[node] = pn;
4131 return 0;
4132}
4133
4134static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4135{
4136 kfree(memcg->nodeinfo[node]);
4137}
4138
4139static void __mem_cgroup_free(struct mem_cgroup *memcg)
4140{
4141 int node;
4142
4143 for_each_node(node)
4144 free_mem_cgroup_per_node_info(memcg, node);
4145 free_percpu(memcg->stat);
4146 kfree(memcg);
4147}
4148
4149static void mem_cgroup_free(struct mem_cgroup *memcg)
4150{
4151 memcg_wb_domain_exit(memcg);
4152 __mem_cgroup_free(memcg);
4153}
4154
4155static struct mem_cgroup *mem_cgroup_alloc(void)
4156{
4157 struct mem_cgroup *memcg;
4158 size_t size;
4159 int node;
4160
4161 size = sizeof(struct mem_cgroup);
4162 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4163
4164 memcg = kzalloc(size, GFP_KERNEL);
4165 if (!memcg)
4166 return NULL;
4167
4168 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4169 1, MEM_CGROUP_ID_MAX,
4170 GFP_KERNEL);
4171 if (memcg->id.id < 0)
4172 goto fail;
4173
4174 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4175 if (!memcg->stat)
4176 goto fail;
4177
4178 for_each_node(node)
4179 if (alloc_mem_cgroup_per_node_info(memcg, node))
4180 goto fail;
4181
4182 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4183 goto fail;
4184
4185 INIT_WORK(&memcg->high_work, high_work_func);
4186 memcg->last_scanned_node = MAX_NUMNODES;
4187 INIT_LIST_HEAD(&memcg->oom_notify);
4188 mutex_init(&memcg->thresholds_lock);
4189 spin_lock_init(&memcg->move_lock);
4190 vmpressure_init(&memcg->vmpressure);
4191 INIT_LIST_HEAD(&memcg->event_list);
4192 spin_lock_init(&memcg->event_list_lock);
4193 memcg->socket_pressure = jiffies;
4194#ifndef CONFIG_SLOB
4195 memcg->kmemcg_id = -1;
4196#endif
4197#ifdef CONFIG_CGROUP_WRITEBACK
4198 INIT_LIST_HEAD(&memcg->cgwb_list);
4199#endif
4200 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4201 return memcg;
4202fail:
4203 if (memcg->id.id > 0)
4204 idr_remove(&mem_cgroup_idr, memcg->id.id);
4205 __mem_cgroup_free(memcg);
4206 return NULL;
4207}
4208
4209static struct cgroup_subsys_state * __ref
4210mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4211{
4212 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4213 struct mem_cgroup *memcg;
4214 long error = -ENOMEM;
4215
4216 memcg = mem_cgroup_alloc();
4217 if (!memcg)
4218 return ERR_PTR(error);
4219
4220 memcg->high = PAGE_COUNTER_MAX;
4221 memcg->soft_limit = PAGE_COUNTER_MAX;
4222 if (parent) {
4223 memcg->swappiness = mem_cgroup_swappiness(parent);
4224 memcg->oom_kill_disable = parent->oom_kill_disable;
4225 }
4226 if (parent && parent->use_hierarchy) {
4227 memcg->use_hierarchy = true;
4228 page_counter_init(&memcg->memory, &parent->memory);
4229 page_counter_init(&memcg->swap, &parent->swap);
4230 page_counter_init(&memcg->memsw, &parent->memsw);
4231 page_counter_init(&memcg->kmem, &parent->kmem);
4232 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4233 } else {
4234 page_counter_init(&memcg->memory, NULL);
4235 page_counter_init(&memcg->swap, NULL);
4236 page_counter_init(&memcg->memsw, NULL);
4237 page_counter_init(&memcg->kmem, NULL);
4238 page_counter_init(&memcg->tcpmem, NULL);
4239
4240
4241
4242
4243
4244 if (parent != root_mem_cgroup)
4245 memory_cgrp_subsys.broken_hierarchy = true;
4246 }
4247
4248
4249 if (!parent) {
4250 root_mem_cgroup = memcg;
4251 return &memcg->css;
4252 }
4253
4254 error = memcg_online_kmem(memcg);
4255 if (error)
4256 goto fail;
4257
4258 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4259 static_branch_inc(&memcg_sockets_enabled_key);
4260
4261 return &memcg->css;
4262fail:
4263 mem_cgroup_free(memcg);
4264 return ERR_PTR(-ENOMEM);
4265}
4266
4267static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4268{
4269 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4270
4271
4272 atomic_set(&memcg->id.ref, 1);
4273 css_get(css);
4274 return 0;
4275}
4276
4277static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4278{
4279 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4280 struct mem_cgroup_event *event, *tmp;
4281
4282
4283
4284
4285
4286
4287 spin_lock(&memcg->event_list_lock);
4288 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4289 list_del_init(&event->list);
4290 schedule_work(&event->remove);
4291 }
4292 spin_unlock(&memcg->event_list_lock);
4293
4294 memcg_offline_kmem(memcg);
4295 wb_memcg_offline(memcg);
4296
4297 mem_cgroup_id_put(memcg);
4298}
4299
4300static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4301{
4302 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4303
4304 invalidate_reclaim_iterators(memcg);
4305}
4306
4307static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4308{
4309 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4310
4311 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4312 static_branch_dec(&memcg_sockets_enabled_key);
4313
4314 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4315 static_branch_dec(&memcg_sockets_enabled_key);
4316
4317 vmpressure_cleanup(&memcg->vmpressure);
4318 cancel_work_sync(&memcg->high_work);
4319 mem_cgroup_remove_from_trees(memcg);
4320 memcg_free_kmem(memcg);
4321 mem_cgroup_free(memcg);
4322}
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4338{
4339 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4340
4341 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4342 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4343 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4344 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4345 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4346 memcg->low = 0;
4347 memcg->high = PAGE_COUNTER_MAX;
4348 memcg->soft_limit = PAGE_COUNTER_MAX;
4349 memcg_wb_domain_size_changed(memcg);
4350}
4351
4352#ifdef CONFIG_MMU
4353
4354static int mem_cgroup_do_precharge(unsigned long count)
4355{
4356 int ret;
4357
4358
4359 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4360 if (!ret) {
4361 mc.precharge += count;
4362 return ret;
4363 }
4364
4365
4366 while (count--) {
4367 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4368 if (ret)
4369 return ret;
4370 mc.precharge++;
4371 cond_resched();
4372 }
4373 return 0;
4374}
4375
4376union mc_target {
4377 struct page *page;
4378 swp_entry_t ent;
4379};
4380
4381enum mc_target_type {
4382 MC_TARGET_NONE = 0,
4383 MC_TARGET_PAGE,
4384 MC_TARGET_SWAP,
4385};
4386
4387static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4388 unsigned long addr, pte_t ptent)
4389{
4390 struct page *page = vm_normal_page(vma, addr, ptent);
4391
4392 if (!page || !page_mapped(page))
4393 return NULL;
4394 if (PageAnon(page)) {
4395 if (!(mc.flags & MOVE_ANON))
4396 return NULL;
4397 } else {
4398 if (!(mc.flags & MOVE_FILE))
4399 return NULL;
4400 }
4401 if (!get_page_unless_zero(page))
4402 return NULL;
4403
4404 return page;
4405}
4406
4407#ifdef CONFIG_SWAP
4408static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4409 pte_t ptent, swp_entry_t *entry)
4410{
4411 struct page *page = NULL;
4412 swp_entry_t ent = pte_to_swp_entry(ptent);
4413
4414 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4415 return NULL;
4416
4417
4418
4419
4420 page = find_get_page(swap_address_space(ent), swp_offset(ent));
4421 if (do_memsw_account())
4422 entry->val = ent.val;
4423
4424 return page;
4425}
4426#else
4427static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4428 pte_t ptent, swp_entry_t *entry)
4429{
4430 return NULL;
4431}
4432#endif
4433
4434static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4435 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4436{
4437 struct page *page = NULL;
4438 struct address_space *mapping;
4439 pgoff_t pgoff;
4440
4441 if (!vma->vm_file)
4442 return NULL;
4443 if (!(mc.flags & MOVE_FILE))
4444 return NULL;
4445
4446 mapping = vma->vm_file->f_mapping;
4447 pgoff = linear_page_index(vma, addr);
4448
4449
4450#ifdef CONFIG_SWAP
4451
4452 if (shmem_mapping(mapping)) {
4453 page = find_get_entry(mapping, pgoff);
4454 if (radix_tree_exceptional_entry(page)) {
4455 swp_entry_t swp = radix_to_swp_entry(page);
4456 if (do_memsw_account())
4457 *entry = swp;
4458 page = find_get_page(swap_address_space(swp),
4459 swp_offset(swp));
4460 }
4461 } else
4462 page = find_get_page(mapping, pgoff);
4463#else
4464 page = find_get_page(mapping, pgoff);
4465#endif
4466 return page;
4467}
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481static int mem_cgroup_move_account(struct page *page,
4482 bool compound,
4483 struct mem_cgroup *from,
4484 struct mem_cgroup *to)
4485{
4486 unsigned long flags;
4487 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4488 int ret;
4489 bool anon;
4490
4491 VM_BUG_ON(from == to);
4492 VM_BUG_ON_PAGE(PageLRU(page), page);
4493 VM_BUG_ON(compound && !PageTransHuge(page));
4494
4495
4496
4497
4498
4499 ret = -EBUSY;
4500 if (!trylock_page(page))
4501 goto out;
4502
4503 ret = -EINVAL;
4504 if (page->mem_cgroup != from)
4505 goto out_unlock;
4506
4507 anon = PageAnon(page);
4508
4509 spin_lock_irqsave(&from->move_lock, flags);
4510
4511 if (!anon && page_mapped(page)) {
4512 __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
4513 __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
4514 }
4515
4516
4517
4518
4519
4520
4521 if (!anon && PageDirty(page)) {
4522 struct address_space *mapping = page_mapping(page);
4523
4524 if (mapping_cap_account_dirty(mapping)) {
4525 __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
4526 nr_pages);
4527 __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
4528 nr_pages);
4529 }
4530 }
4531
4532 if (PageWriteback(page)) {
4533 __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
4534 __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
4535 }
4536
4537
4538
4539
4540
4541
4542
4543
4544 page->mem_cgroup = to;
4545 spin_unlock_irqrestore(&from->move_lock, flags);
4546
4547 ret = 0;
4548
4549 local_irq_disable();
4550 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4551 memcg_check_events(to, page);
4552 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4553 memcg_check_events(from, page);
4554 local_irq_enable();
4555out_unlock:
4556 unlock_page(page);
4557out:
4558 return ret;
4559}
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4581 unsigned long addr, pte_t ptent, union mc_target *target)
4582{
4583 struct page *page = NULL;
4584 enum mc_target_type ret = MC_TARGET_NONE;
4585 swp_entry_t ent = { .val = 0 };
4586
4587 if (pte_present(ptent))
4588 page = mc_handle_present_pte(vma, addr, ptent);
4589 else if (is_swap_pte(ptent))
4590 page = mc_handle_swap_pte(vma, ptent, &ent);
4591 else if (pte_none(ptent))
4592 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4593
4594 if (!page && !ent.val)
4595 return ret;
4596 if (page) {
4597
4598
4599
4600
4601
4602 if (page->mem_cgroup == mc.from) {
4603 ret = MC_TARGET_PAGE;
4604 if (target)
4605 target->page = page;
4606 }
4607 if (!ret || !target)
4608 put_page(page);
4609 }
4610
4611 if (ent.val && !ret &&
4612 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4613 ret = MC_TARGET_SWAP;
4614 if (target)
4615 target->ent = ent;
4616 }
4617 return ret;
4618}
4619
4620#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4621
4622
4623
4624
4625
4626static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4627 unsigned long addr, pmd_t pmd, union mc_target *target)
4628{
4629 struct page *page = NULL;
4630 enum mc_target_type ret = MC_TARGET_NONE;
4631
4632 page = pmd_page(pmd);
4633 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4634 if (!(mc.flags & MOVE_ANON))
4635 return ret;
4636 if (page->mem_cgroup == mc.from) {
4637 ret = MC_TARGET_PAGE;
4638 if (target) {
4639 get_page(page);
4640 target->page = page;
4641 }
4642 }
4643 return ret;
4644}
4645#else
4646static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4647 unsigned long addr, pmd_t pmd, union mc_target *target)
4648{
4649 return MC_TARGET_NONE;
4650}
4651#endif
4652
4653static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4654 unsigned long addr, unsigned long end,
4655 struct mm_walk *walk)
4656{
4657 struct vm_area_struct *vma = walk->vma;
4658 pte_t *pte;
4659 spinlock_t *ptl;
4660
4661 ptl = pmd_trans_huge_lock(pmd, vma);
4662 if (ptl) {
4663 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4664 mc.precharge += HPAGE_PMD_NR;
4665 spin_unlock(ptl);
4666 return 0;
4667 }
4668
4669 if (pmd_trans_unstable(pmd))
4670 return 0;
4671 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4672 for (; addr != end; pte++, addr += PAGE_SIZE)
4673 if (get_mctgt_type(vma, addr, *pte, NULL))
4674 mc.precharge++;
4675 pte_unmap_unlock(pte - 1, ptl);
4676 cond_resched();
4677
4678 return 0;
4679}
4680
4681static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4682{
4683 unsigned long precharge;
4684
4685 struct mm_walk mem_cgroup_count_precharge_walk = {
4686 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4687 .mm = mm,
4688 };
4689 down_read(&mm->mmap_sem);
4690 walk_page_range(0, mm->highest_vm_end,
4691 &mem_cgroup_count_precharge_walk);
4692 up_read(&mm->mmap_sem);
4693
4694 precharge = mc.precharge;
4695 mc.precharge = 0;
4696
4697 return precharge;
4698}
4699
4700static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4701{
4702 unsigned long precharge = mem_cgroup_count_precharge(mm);
4703
4704 VM_BUG_ON(mc.moving_task);
4705 mc.moving_task = current;
4706 return mem_cgroup_do_precharge(precharge);
4707}
4708
4709
4710static void __mem_cgroup_clear_mc(void)
4711{
4712 struct mem_cgroup *from = mc.from;
4713 struct mem_cgroup *to = mc.to;
4714
4715
4716 if (mc.precharge) {
4717 cancel_charge(mc.to, mc.precharge);
4718 mc.precharge = 0;
4719 }
4720
4721
4722
4723
4724 if (mc.moved_charge) {
4725 cancel_charge(mc.from, mc.moved_charge);
4726 mc.moved_charge = 0;
4727 }
4728
4729 if (mc.moved_swap) {
4730
4731 if (!mem_cgroup_is_root(mc.from))
4732 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4733
4734 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4735
4736
4737
4738
4739
4740 if (!mem_cgroup_is_root(mc.to))
4741 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4742
4743 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4744 css_put_many(&mc.to->css, mc.moved_swap);
4745
4746 mc.moved_swap = 0;
4747 }
4748 memcg_oom_recover(from);
4749 memcg_oom_recover(to);
4750 wake_up_all(&mc.waitq);
4751}
4752
4753static void mem_cgroup_clear_mc(void)
4754{
4755 struct mm_struct *mm = mc.mm;
4756
4757
4758
4759
4760
4761 mc.moving_task = NULL;
4762 __mem_cgroup_clear_mc();
4763 spin_lock(&mc.lock);
4764 mc.from = NULL;
4765 mc.to = NULL;
4766 mc.mm = NULL;
4767 spin_unlock(&mc.lock);
4768
4769 mmput(mm);
4770}
4771
4772static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4773{
4774 struct cgroup_subsys_state *css;
4775 struct mem_cgroup *memcg = NULL;
4776 struct mem_cgroup *from;
4777 struct task_struct *leader, *p;
4778 struct mm_struct *mm;
4779 unsigned long move_flags;
4780 int ret = 0;
4781
4782
4783 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4784 return 0;
4785
4786
4787
4788
4789
4790
4791
4792 p = NULL;
4793 cgroup_taskset_for_each_leader(leader, css, tset) {
4794 WARN_ON_ONCE(p);
4795 p = leader;
4796 memcg = mem_cgroup_from_css(css);
4797 }
4798 if (!p)
4799 return 0;
4800
4801
4802
4803
4804
4805
4806 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4807 if (!move_flags)
4808 return 0;
4809
4810 from = mem_cgroup_from_task(p);
4811
4812 VM_BUG_ON(from == memcg);
4813
4814 mm = get_task_mm(p);
4815 if (!mm)
4816 return 0;
4817
4818 if (mm->owner == p) {
4819 VM_BUG_ON(mc.from);
4820 VM_BUG_ON(mc.to);
4821 VM_BUG_ON(mc.precharge);
4822 VM_BUG_ON(mc.moved_charge);
4823 VM_BUG_ON(mc.moved_swap);
4824
4825 spin_lock(&mc.lock);
4826 mc.mm = mm;
4827 mc.from = from;
4828 mc.to = memcg;
4829 mc.flags = move_flags;
4830 spin_unlock(&mc.lock);
4831
4832
4833 ret = mem_cgroup_precharge_mc(mm);
4834 if (ret)
4835 mem_cgroup_clear_mc();
4836 } else {
4837 mmput(mm);
4838 }
4839 return ret;
4840}
4841
4842static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4843{
4844 if (mc.to)
4845 mem_cgroup_clear_mc();
4846}
4847
4848static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4849 unsigned long addr, unsigned long end,
4850 struct mm_walk *walk)
4851{
4852 int ret = 0;
4853 struct vm_area_struct *vma = walk->vma;
4854 pte_t *pte;
4855 spinlock_t *ptl;
4856 enum mc_target_type target_type;
4857 union mc_target target;
4858 struct page *page;
4859
4860 ptl = pmd_trans_huge_lock(pmd, vma);
4861 if (ptl) {
4862 if (mc.precharge < HPAGE_PMD_NR) {
4863 spin_unlock(ptl);
4864 return 0;
4865 }
4866 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4867 if (target_type == MC_TARGET_PAGE) {
4868 page = target.page;
4869 if (!isolate_lru_page(page)) {
4870 if (!mem_cgroup_move_account(page, true,
4871 mc.from, mc.to)) {
4872 mc.precharge -= HPAGE_PMD_NR;
4873 mc.moved_charge += HPAGE_PMD_NR;
4874 }
4875 putback_lru_page(page);
4876 }
4877 put_page(page);
4878 }
4879 spin_unlock(ptl);
4880 return 0;
4881 }
4882
4883 if (pmd_trans_unstable(pmd))
4884 return 0;
4885retry:
4886 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4887 for (; addr != end; addr += PAGE_SIZE) {
4888 pte_t ptent = *(pte++);
4889 swp_entry_t ent;
4890
4891 if (!mc.precharge)
4892 break;
4893
4894 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4895 case MC_TARGET_PAGE:
4896 page = target.page;
4897
4898
4899
4900
4901
4902
4903 if (PageTransCompound(page))
4904 goto put;
4905 if (isolate_lru_page(page))
4906 goto put;
4907 if (!mem_cgroup_move_account(page, false,
4908 mc.from, mc.to)) {
4909 mc.precharge--;
4910
4911 mc.moved_charge++;
4912 }
4913 putback_lru_page(page);
4914put:
4915 put_page(page);
4916 break;
4917 case MC_TARGET_SWAP:
4918 ent = target.ent;
4919 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4920 mc.precharge--;
4921
4922 mc.moved_swap++;
4923 }
4924 break;
4925 default:
4926 break;
4927 }
4928 }
4929 pte_unmap_unlock(pte - 1, ptl);
4930 cond_resched();
4931
4932 if (addr != end) {
4933
4934
4935
4936
4937
4938
4939 ret = mem_cgroup_do_precharge(1);
4940 if (!ret)
4941 goto retry;
4942 }
4943
4944 return ret;
4945}
4946
4947static void mem_cgroup_move_charge(void)
4948{
4949 struct mm_walk mem_cgroup_move_charge_walk = {
4950 .pmd_entry = mem_cgroup_move_charge_pte_range,
4951 .mm = mc.mm,
4952 };
4953
4954 lru_add_drain_all();
4955
4956
4957
4958
4959
4960 atomic_inc(&mc.from->moving_account);
4961 synchronize_rcu();
4962retry:
4963 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4964
4965
4966
4967
4968
4969
4970
4971 __mem_cgroup_clear_mc();
4972 cond_resched();
4973 goto retry;
4974 }
4975
4976
4977
4978
4979 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
4980
4981 up_read(&mc.mm->mmap_sem);
4982 atomic_dec(&mc.from->moving_account);
4983}
4984
4985static void mem_cgroup_move_task(void)
4986{
4987 if (mc.to) {
4988 mem_cgroup_move_charge();
4989 mem_cgroup_clear_mc();
4990 }
4991}
4992#else
4993static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4994{
4995 return 0;
4996}
4997static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4998{
4999}
5000static void mem_cgroup_move_task(void)
5001{
5002}
5003#endif
5004
5005
5006
5007
5008
5009
5010static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5011{
5012
5013
5014
5015
5016
5017 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5018 root_mem_cgroup->use_hierarchy = true;
5019 else
5020 root_mem_cgroup->use_hierarchy = false;
5021}
5022
5023static u64 memory_current_read(struct cgroup_subsys_state *css,
5024 struct cftype *cft)
5025{
5026 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5027
5028 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5029}
5030
5031static int memory_low_show(struct seq_file *m, void *v)
5032{
5033 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5034 unsigned long low = READ_ONCE(memcg->low);
5035
5036 if (low == PAGE_COUNTER_MAX)
5037 seq_puts(m, "max\n");
5038 else
5039 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5040
5041 return 0;
5042}
5043
5044static ssize_t memory_low_write(struct kernfs_open_file *of,
5045 char *buf, size_t nbytes, loff_t off)
5046{
5047 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5048 unsigned long low;
5049 int err;
5050
5051 buf = strstrip(buf);
5052 err = page_counter_memparse(buf, "max", &low);
5053 if (err)
5054 return err;
5055
5056 memcg->low = low;
5057
5058 return nbytes;
5059}
5060
5061static int memory_high_show(struct seq_file *m, void *v)
5062{
5063 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5064 unsigned long high = READ_ONCE(memcg->high);
5065
5066 if (high == PAGE_COUNTER_MAX)
5067 seq_puts(m, "max\n");
5068 else
5069 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5070
5071 return 0;
5072}
5073
5074static ssize_t memory_high_write(struct kernfs_open_file *of,
5075 char *buf, size_t nbytes, loff_t off)
5076{
5077 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5078 unsigned long nr_pages;
5079 unsigned long high;
5080 int err;
5081
5082 buf = strstrip(buf);
5083 err = page_counter_memparse(buf, "max", &high);
5084 if (err)
5085 return err;
5086
5087 memcg->high = high;
5088
5089 nr_pages = page_counter_read(&memcg->memory);
5090 if (nr_pages > high)
5091 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5092 GFP_KERNEL, true);
5093
5094 memcg_wb_domain_size_changed(memcg);
5095 return nbytes;
5096}
5097
5098static int memory_max_show(struct seq_file *m, void *v)
5099{
5100 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5101 unsigned long max = READ_ONCE(memcg->memory.limit);
5102
5103 if (max == PAGE_COUNTER_MAX)
5104 seq_puts(m, "max\n");
5105 else
5106 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5107
5108 return 0;
5109}
5110
5111static ssize_t memory_max_write(struct kernfs_open_file *of,
5112 char *buf, size_t nbytes, loff_t off)
5113{
5114 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5115 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5116 bool drained = false;
5117 unsigned long max;
5118 int err;
5119
5120 buf = strstrip(buf);
5121 err = page_counter_memparse(buf, "max", &max);
5122 if (err)
5123 return err;
5124
5125 xchg(&memcg->memory.limit, max);
5126
5127 for (;;) {
5128 unsigned long nr_pages = page_counter_read(&memcg->memory);
5129
5130 if (nr_pages <= max)
5131 break;
5132
5133 if (signal_pending(current)) {
5134 err = -EINTR;
5135 break;
5136 }
5137
5138 if (!drained) {
5139 drain_all_stock(memcg);
5140 drained = true;
5141 continue;
5142 }
5143
5144 if (nr_reclaims) {
5145 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5146 GFP_KERNEL, true))
5147 nr_reclaims--;
5148 continue;
5149 }
5150
5151 mem_cgroup_event(memcg, MEMCG_OOM);
5152 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5153 break;
5154 }
5155
5156 memcg_wb_domain_size_changed(memcg);
5157 return nbytes;
5158}
5159
5160static int memory_events_show(struct seq_file *m, void *v)
5161{
5162 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5163
5164 seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
5165 seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
5166 seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
5167 seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
5168
5169 return 0;
5170}
5171
5172static int memory_stat_show(struct seq_file *m, void *v)
5173{
5174 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5175 unsigned long stat[MEMCG_NR_STAT];
5176 unsigned long events[MEMCG_NR_EVENTS];
5177 int i;
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190 tree_stat(memcg, stat);
5191 tree_events(memcg, events);
5192
5193 seq_printf(m, "anon %llu\n",
5194 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5195 seq_printf(m, "file %llu\n",
5196 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5197 seq_printf(m, "kernel_stack %llu\n",
5198 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5199 seq_printf(m, "slab %llu\n",
5200 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5201 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5202 seq_printf(m, "sock %llu\n",
5203 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5204
5205 seq_printf(m, "shmem %llu\n",
5206 (u64)stat[NR_SHMEM] * PAGE_SIZE);
5207 seq_printf(m, "file_mapped %llu\n",
5208 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5209 seq_printf(m, "file_dirty %llu\n",
5210 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5211 seq_printf(m, "file_writeback %llu\n",
5212 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5213
5214 for (i = 0; i < NR_LRU_LISTS; i++) {
5215 struct mem_cgroup *mi;
5216 unsigned long val = 0;
5217
5218 for_each_mem_cgroup_tree(mi, memcg)
5219 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5220 seq_printf(m, "%s %llu\n",
5221 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5222 }
5223
5224 seq_printf(m, "slab_reclaimable %llu\n",
5225 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5226 seq_printf(m, "slab_unreclaimable %llu\n",
5227 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5228
5229
5230
5231 seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5232 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5233
5234 seq_printf(m, "workingset_refault %lu\n",
5235 stat[WORKINGSET_REFAULT]);
5236 seq_printf(m, "workingset_activate %lu\n",
5237 stat[WORKINGSET_ACTIVATE]);
5238 seq_printf(m, "workingset_nodereclaim %lu\n",
5239 stat[WORKINGSET_NODERECLAIM]);
5240
5241 return 0;
5242}
5243
5244static struct cftype memory_files[] = {
5245 {
5246 .name = "current",
5247 .flags = CFTYPE_NOT_ON_ROOT,
5248 .read_u64 = memory_current_read,
5249 },
5250 {
5251 .name = "low",
5252 .flags = CFTYPE_NOT_ON_ROOT,
5253 .seq_show = memory_low_show,
5254 .write = memory_low_write,
5255 },
5256 {
5257 .name = "high",
5258 .flags = CFTYPE_NOT_ON_ROOT,
5259 .seq_show = memory_high_show,
5260 .write = memory_high_write,
5261 },
5262 {
5263 .name = "max",
5264 .flags = CFTYPE_NOT_ON_ROOT,
5265 .seq_show = memory_max_show,
5266 .write = memory_max_write,
5267 },
5268 {
5269 .name = "events",
5270 .flags = CFTYPE_NOT_ON_ROOT,
5271 .file_offset = offsetof(struct mem_cgroup, events_file),
5272 .seq_show = memory_events_show,
5273 },
5274 {
5275 .name = "stat",
5276 .flags = CFTYPE_NOT_ON_ROOT,
5277 .seq_show = memory_stat_show,
5278 },
5279 { }
5280};
5281
5282struct cgroup_subsys memory_cgrp_subsys = {
5283 .css_alloc = mem_cgroup_css_alloc,
5284 .css_online = mem_cgroup_css_online,
5285 .css_offline = mem_cgroup_css_offline,
5286 .css_released = mem_cgroup_css_released,
5287 .css_free = mem_cgroup_css_free,
5288 .css_reset = mem_cgroup_css_reset,
5289 .can_attach = mem_cgroup_can_attach,
5290 .cancel_attach = mem_cgroup_cancel_attach,
5291 .post_attach = mem_cgroup_move_task,
5292 .bind = mem_cgroup_bind,
5293 .dfl_cftypes = memory_files,
5294 .legacy_cftypes = mem_cgroup_legacy_files,
5295 .early_init = 0,
5296};
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5307{
5308 if (mem_cgroup_disabled())
5309 return false;
5310
5311
5312
5313
5314
5315
5316
5317 if (memcg == root_mem_cgroup)
5318 return false;
5319
5320 if (page_counter_read(&memcg->memory) >= memcg->low)
5321 return false;
5322
5323 while (memcg != root) {
5324 memcg = parent_mem_cgroup(memcg);
5325
5326 if (memcg == root_mem_cgroup)
5327 break;
5328
5329 if (page_counter_read(&memcg->memory) >= memcg->low)
5330 return false;
5331 }
5332 return true;
5333}
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5354 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5355 bool compound)
5356{
5357 struct mem_cgroup *memcg = NULL;
5358 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5359 int ret = 0;
5360
5361 if (mem_cgroup_disabled())
5362 goto out;
5363
5364 if (PageSwapCache(page)) {
5365
5366
5367
5368
5369
5370
5371
5372 VM_BUG_ON_PAGE(!PageLocked(page), page);
5373 if (page->mem_cgroup)
5374 goto out;
5375
5376 if (do_swap_account) {
5377 swp_entry_t ent = { .val = page_private(page), };
5378 unsigned short id = lookup_swap_cgroup_id(ent);
5379
5380 rcu_read_lock();
5381 memcg = mem_cgroup_from_id(id);
5382 if (memcg && !css_tryget_online(&memcg->css))
5383 memcg = NULL;
5384 rcu_read_unlock();
5385 }
5386 }
5387
5388 if (!memcg)
5389 memcg = get_mem_cgroup_from_mm(mm);
5390
5391 ret = try_charge(memcg, gfp_mask, nr_pages);
5392
5393 css_put(&memcg->css);
5394out:
5395 *memcgp = memcg;
5396 return ret;
5397}
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5417 bool lrucare, bool compound)
5418{
5419 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5420
5421 VM_BUG_ON_PAGE(!page->mapping, page);
5422 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5423
5424 if (mem_cgroup_disabled())
5425 return;
5426
5427
5428
5429
5430
5431 if (!memcg)
5432 return;
5433
5434 commit_charge(page, memcg, lrucare);
5435
5436 local_irq_disable();
5437 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5438 memcg_check_events(memcg, page);
5439 local_irq_enable();
5440
5441 if (do_memsw_account() && PageSwapCache(page)) {
5442 swp_entry_t entry = { .val = page_private(page) };
5443
5444
5445
5446
5447
5448 mem_cgroup_uncharge_swap(entry);
5449 }
5450}
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5461 bool compound)
5462{
5463 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5464
5465 if (mem_cgroup_disabled())
5466 return;
5467
5468
5469
5470
5471
5472 if (!memcg)
5473 return;
5474
5475 cancel_charge(memcg, nr_pages);
5476}
5477
5478static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5479 unsigned long nr_anon, unsigned long nr_file,
5480 unsigned long nr_kmem, unsigned long nr_huge,
5481 unsigned long nr_shmem, struct page *dummy_page)
5482{
5483 unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
5484 unsigned long flags;
5485
5486 if (!mem_cgroup_is_root(memcg)) {
5487 page_counter_uncharge(&memcg->memory, nr_pages);
5488 if (do_memsw_account())
5489 page_counter_uncharge(&memcg->memsw, nr_pages);
5490 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
5491 page_counter_uncharge(&memcg->kmem, nr_kmem);
5492 memcg_oom_recover(memcg);
5493 }
5494
5495 local_irq_save(flags);
5496 __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
5497 __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
5498 __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
5499 __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem);
5500 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
5501 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5502 memcg_check_events(memcg, dummy_page);
5503 local_irq_restore(flags);
5504
5505 if (!mem_cgroup_is_root(memcg))
5506 css_put_many(&memcg->css, nr_pages);
5507}
5508
5509static void uncharge_list(struct list_head *page_list)
5510{
5511 struct mem_cgroup *memcg = NULL;
5512 unsigned long nr_shmem = 0;
5513 unsigned long nr_anon = 0;
5514 unsigned long nr_file = 0;
5515 unsigned long nr_huge = 0;
5516 unsigned long nr_kmem = 0;
5517 unsigned long pgpgout = 0;
5518 struct list_head *next;
5519 struct page *page;
5520
5521
5522
5523
5524
5525 next = page_list->next;
5526 do {
5527 page = list_entry(next, struct page, lru);
5528 next = page->lru.next;
5529
5530 VM_BUG_ON_PAGE(PageLRU(page), page);
5531 VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
5532
5533 if (!page->mem_cgroup)
5534 continue;
5535
5536
5537
5538
5539
5540
5541
5542 if (memcg != page->mem_cgroup) {
5543 if (memcg) {
5544 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5545 nr_kmem, nr_huge, nr_shmem, page);
5546 pgpgout = nr_anon = nr_file = nr_kmem = 0;
5547 nr_huge = nr_shmem = 0;
5548 }
5549 memcg = page->mem_cgroup;
5550 }
5551
5552 if (!PageKmemcg(page)) {
5553 unsigned int nr_pages = 1;
5554
5555 if (PageTransHuge(page)) {
5556 nr_pages <<= compound_order(page);
5557 nr_huge += nr_pages;
5558 }
5559 if (PageAnon(page))
5560 nr_anon += nr_pages;
5561 else {
5562 nr_file += nr_pages;
5563 if (PageSwapBacked(page))
5564 nr_shmem += nr_pages;
5565 }
5566 pgpgout++;
5567 } else {
5568 nr_kmem += 1 << compound_order(page);
5569 __ClearPageKmemcg(page);
5570 }
5571
5572 page->mem_cgroup = NULL;
5573 } while (next != page_list);
5574
5575 if (memcg)
5576 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5577 nr_kmem, nr_huge, nr_shmem, page);
5578}
5579
5580
5581
5582
5583
5584
5585
5586
5587void mem_cgroup_uncharge(struct page *page)
5588{
5589 if (mem_cgroup_disabled())
5590 return;
5591
5592
5593 if (!page->mem_cgroup)
5594 return;
5595
5596 INIT_LIST_HEAD(&page->lru);
5597 uncharge_list(&page->lru);
5598}
5599
5600
5601
5602
5603
5604
5605
5606
5607void mem_cgroup_uncharge_list(struct list_head *page_list)
5608{
5609 if (mem_cgroup_disabled())
5610 return;
5611
5612 if (!list_empty(page_list))
5613 uncharge_list(page_list);
5614}
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5627{
5628 struct mem_cgroup *memcg;
5629 unsigned int nr_pages;
5630 bool compound;
5631 unsigned long flags;
5632
5633 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5634 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5635 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5636 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5637 newpage);
5638
5639 if (mem_cgroup_disabled())
5640 return;
5641
5642
5643 if (newpage->mem_cgroup)
5644 return;
5645
5646
5647 memcg = oldpage->mem_cgroup;
5648 if (!memcg)
5649 return;
5650
5651
5652 compound = PageTransHuge(newpage);
5653 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5654
5655 page_counter_charge(&memcg->memory, nr_pages);
5656 if (do_memsw_account())
5657 page_counter_charge(&memcg->memsw, nr_pages);
5658 css_get_many(&memcg->css, nr_pages);
5659
5660 commit_charge(newpage, memcg, false);
5661
5662 local_irq_save(flags);
5663 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5664 memcg_check_events(memcg, newpage);
5665 local_irq_restore(flags);
5666}
5667
5668DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5669EXPORT_SYMBOL(memcg_sockets_enabled_key);
5670
5671void mem_cgroup_sk_alloc(struct sock *sk)
5672{
5673 struct mem_cgroup *memcg;
5674
5675 if (!mem_cgroup_sockets_enabled)
5676 return;
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687 if (sk->sk_memcg) {
5688 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5689 css_get(&sk->sk_memcg->css);
5690 return;
5691 }
5692
5693 rcu_read_lock();
5694 memcg = mem_cgroup_from_task(current);
5695 if (memcg == root_mem_cgroup)
5696 goto out;
5697 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5698 goto out;
5699 if (css_tryget_online(&memcg->css))
5700 sk->sk_memcg = memcg;
5701out:
5702 rcu_read_unlock();
5703}
5704
5705void mem_cgroup_sk_free(struct sock *sk)
5706{
5707 if (sk->sk_memcg)
5708 css_put(&sk->sk_memcg->css);
5709}
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5720{
5721 gfp_t gfp_mask = GFP_KERNEL;
5722
5723 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5724 struct page_counter *fail;
5725
5726 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5727 memcg->tcpmem_pressure = 0;
5728 return true;
5729 }
5730 page_counter_charge(&memcg->tcpmem, nr_pages);
5731 memcg->tcpmem_pressure = 1;
5732 return false;
5733 }
5734
5735
5736 if (in_softirq())
5737 gfp_mask = GFP_NOWAIT;
5738
5739 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5740
5741 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5742 return true;
5743
5744 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5745 return false;
5746}
5747
5748
5749
5750
5751
5752
5753void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5754{
5755 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5756 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5757 return;
5758 }
5759
5760 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5761
5762 page_counter_uncharge(&memcg->memory, nr_pages);
5763 css_put_many(&memcg->css, nr_pages);
5764}
5765
5766static int __init cgroup_memory(char *s)
5767{
5768 char *token;
5769
5770 while ((token = strsep(&s, ",")) != NULL) {
5771 if (!*token)
5772 continue;
5773 if (!strcmp(token, "nosocket"))
5774 cgroup_memory_nosocket = true;
5775 if (!strcmp(token, "nokmem"))
5776 cgroup_memory_nokmem = true;
5777 }
5778 return 0;
5779}
5780__setup("cgroup.memory=", cgroup_memory);
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790static int __init mem_cgroup_init(void)
5791{
5792 int cpu, node;
5793
5794#ifndef CONFIG_SLOB
5795
5796
5797
5798
5799
5800
5801 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5802 BUG_ON(!memcg_kmem_cache_wq);
5803#endif
5804
5805 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5806 memcg_hotplug_cpu_dead);
5807
5808 for_each_possible_cpu(cpu)
5809 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5810 drain_local_stock);
5811
5812 for_each_node(node) {
5813 struct mem_cgroup_tree_per_node *rtpn;
5814
5815 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5816 node_online(node) ? node : NUMA_NO_NODE);
5817
5818 rtpn->rb_root = RB_ROOT;
5819 spin_lock_init(&rtpn->lock);
5820 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5821 }
5822
5823 return 0;
5824}
5825subsys_initcall(mem_cgroup_init);
5826
5827#ifdef CONFIG_MEMCG_SWAP
5828static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5829{
5830 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5831
5832
5833
5834
5835 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5836 VM_BUG_ON(1);
5837 break;
5838 }
5839 memcg = parent_mem_cgroup(memcg);
5840 if (!memcg)
5841 memcg = root_mem_cgroup;
5842 }
5843 return memcg;
5844}
5845
5846
5847
5848
5849
5850
5851
5852
5853void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5854{
5855 struct mem_cgroup *memcg, *swap_memcg;
5856 unsigned short oldid;
5857
5858 VM_BUG_ON_PAGE(PageLRU(page), page);
5859 VM_BUG_ON_PAGE(page_count(page), page);
5860
5861 if (!do_memsw_account())
5862 return;
5863
5864 memcg = page->mem_cgroup;
5865
5866
5867 if (!memcg)
5868 return;
5869
5870
5871
5872
5873
5874
5875 swap_memcg = mem_cgroup_id_get_online(memcg);
5876 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
5877 VM_BUG_ON_PAGE(oldid, page);
5878 mem_cgroup_swap_statistics(swap_memcg, true);
5879
5880 page->mem_cgroup = NULL;
5881
5882 if (!mem_cgroup_is_root(memcg))
5883 page_counter_uncharge(&memcg->memory, 1);
5884
5885 if (memcg != swap_memcg) {
5886 if (!mem_cgroup_is_root(swap_memcg))
5887 page_counter_charge(&swap_memcg->memsw, 1);
5888 page_counter_uncharge(&memcg->memsw, 1);
5889 }
5890
5891
5892
5893
5894
5895
5896
5897 VM_BUG_ON(!irqs_disabled());
5898 mem_cgroup_charge_statistics(memcg, page, false, -1);
5899 memcg_check_events(memcg, page);
5900
5901 if (!mem_cgroup_is_root(memcg))
5902 css_put(&memcg->css);
5903}
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5915{
5916 struct mem_cgroup *memcg;
5917 struct page_counter *counter;
5918 unsigned short oldid;
5919
5920 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5921 return 0;
5922
5923 memcg = page->mem_cgroup;
5924
5925
5926 if (!memcg)
5927 return 0;
5928
5929 memcg = mem_cgroup_id_get_online(memcg);
5930
5931 if (!mem_cgroup_is_root(memcg) &&
5932 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5933 mem_cgroup_id_put(memcg);
5934 return -ENOMEM;
5935 }
5936
5937 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5938 VM_BUG_ON_PAGE(oldid, page);
5939 mem_cgroup_swap_statistics(memcg, true);
5940
5941 return 0;
5942}
5943
5944
5945
5946
5947
5948
5949
5950void mem_cgroup_uncharge_swap(swp_entry_t entry)
5951{
5952 struct mem_cgroup *memcg;
5953 unsigned short id;
5954
5955 if (!do_swap_account)
5956 return;
5957
5958 id = swap_cgroup_record(entry, 0);
5959 rcu_read_lock();
5960 memcg = mem_cgroup_from_id(id);
5961 if (memcg) {
5962 if (!mem_cgroup_is_root(memcg)) {
5963 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5964 page_counter_uncharge(&memcg->swap, 1);
5965 else
5966 page_counter_uncharge(&memcg->memsw, 1);
5967 }
5968 mem_cgroup_swap_statistics(memcg, false);
5969 mem_cgroup_id_put(memcg);
5970 }
5971 rcu_read_unlock();
5972}
5973
5974long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5975{
5976 long nr_swap_pages = get_nr_swap_pages();
5977
5978 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5979 return nr_swap_pages;
5980 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5981 nr_swap_pages = min_t(long, nr_swap_pages,
5982 READ_ONCE(memcg->swap.limit) -
5983 page_counter_read(&memcg->swap));
5984 return nr_swap_pages;
5985}
5986
5987bool mem_cgroup_swap_full(struct page *page)
5988{
5989 struct mem_cgroup *memcg;
5990
5991 VM_BUG_ON_PAGE(!PageLocked(page), page);
5992
5993 if (vm_swap_full())
5994 return true;
5995 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5996 return false;
5997
5998 memcg = page->mem_cgroup;
5999 if (!memcg)
6000 return false;
6001
6002 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6003 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
6004 return true;
6005
6006 return false;
6007}
6008
6009
6010#ifdef CONFIG_MEMCG_SWAP_ENABLED
6011static int really_do_swap_account __initdata = 1;
6012#else
6013static int really_do_swap_account __initdata;
6014#endif
6015
6016static int __init enable_swap_account(char *s)
6017{
6018 if (!strcmp(s, "1"))
6019 really_do_swap_account = 1;
6020 else if (!strcmp(s, "0"))
6021 really_do_swap_account = 0;
6022 return 1;
6023}
6024__setup("swapaccount=", enable_swap_account);
6025
6026static u64 swap_current_read(struct cgroup_subsys_state *css,
6027 struct cftype *cft)
6028{
6029 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6030
6031 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6032}
6033
6034static int swap_max_show(struct seq_file *m, void *v)
6035{
6036 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6037 unsigned long max = READ_ONCE(memcg->swap.limit);
6038
6039 if (max == PAGE_COUNTER_MAX)
6040 seq_puts(m, "max\n");
6041 else
6042 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6043
6044 return 0;
6045}
6046
6047static ssize_t swap_max_write(struct kernfs_open_file *of,
6048 char *buf, size_t nbytes, loff_t off)
6049{
6050 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6051 unsigned long max;
6052 int err;
6053
6054 buf = strstrip(buf);
6055 err = page_counter_memparse(buf, "max", &max);
6056 if (err)
6057 return err;
6058
6059 mutex_lock(&memcg_limit_mutex);
6060 err = page_counter_limit(&memcg->swap, max);
6061 mutex_unlock(&memcg_limit_mutex);
6062 if (err)
6063 return err;
6064
6065 return nbytes;
6066}
6067
6068static struct cftype swap_files[] = {
6069 {
6070 .name = "swap.current",
6071 .flags = CFTYPE_NOT_ON_ROOT,
6072 .read_u64 = swap_current_read,
6073 },
6074 {
6075 .name = "swap.max",
6076 .flags = CFTYPE_NOT_ON_ROOT,
6077 .seq_show = swap_max_show,
6078 .write = swap_max_write,
6079 },
6080 { }
6081};
6082
6083static struct cftype memsw_cgroup_files[] = {
6084 {
6085 .name = "memsw.usage_in_bytes",
6086 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6087 .read_u64 = mem_cgroup_read_u64,
6088 },
6089 {
6090 .name = "memsw.max_usage_in_bytes",
6091 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6092 .write = mem_cgroup_reset,
6093 .read_u64 = mem_cgroup_read_u64,
6094 },
6095 {
6096 .name = "memsw.limit_in_bytes",
6097 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6098 .write = mem_cgroup_write,
6099 .read_u64 = mem_cgroup_read_u64,
6100 },
6101 {
6102 .name = "memsw.failcnt",
6103 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6104 .write = mem_cgroup_reset,
6105 .read_u64 = mem_cgroup_read_u64,
6106 },
6107 { },
6108};
6109
6110static int __init mem_cgroup_swap_init(void)
6111{
6112 if (!mem_cgroup_disabled() && really_do_swap_account) {
6113 do_swap_account = 1;
6114 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6115 swap_files));
6116 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6117 memsw_cgroup_files));
6118 }
6119 return 0;
6120}
6121subsys_initcall(mem_cgroup_swap_init);
6122
6123#endif
6124