1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/hugetlb.h>
39#include <linux/pagemap.h>
40#include <linux/smp.h>
41#include <linux/page-flags.h>
42#include <linux/backing-dev.h>
43#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
45#include <linux/limits.h>
46#include <linux/export.h>
47#include <linux/mutex.h>
48#include <linux/rbtree.h>
49#include <linux/slab.h>
50#include <linux/swap.h>
51#include <linux/swapops.h>
52#include <linux/spinlock.h>
53#include <linux/eventfd.h>
54#include <linux/poll.h>
55#include <linux/sort.h>
56#include <linux/fs.h>
57#include <linux/seq_file.h>
58#include <linux/vmpressure.h>
59#include <linux/mm_inline.h>
60#include <linux/swap_cgroup.h>
61#include <linux/cpu.h>
62#include <linux/oom.h>
63#include <linux/lockdep.h>
64#include <linux/file.h>
65#include <linux/tracehook.h>
66#include "internal.h"
67#include <net/sock.h>
68#include <net/ip.h>
69#include "slab.h"
70
71#include <asm/uaccess.h>
72
73#include <trace/events/vmscan.h>
74
75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
77
78struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80#define MEM_CGROUP_RECLAIM_RETRIES 5
81
82
83static bool cgroup_memory_nosocket;
84
85
86static bool cgroup_memory_nokmem;
87
88
89#ifdef CONFIG_MEMCG_SWAP
90int do_swap_account __read_mostly;
91#else
92#define do_swap_account 0
93#endif
94
95
96static bool do_memsw_account(void)
97{
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99}
100
101static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
104 "rss_huge",
105 "mapped_file",
106 "dirty",
107 "writeback",
108 "swap",
109};
110
111static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116};
117
118static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124};
125
126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
129
130
131
132
133
134
135struct mem_cgroup_tree_per_zone {
136 struct rb_root rb_root;
137 spinlock_t lock;
138};
139
140struct mem_cgroup_tree_per_node {
141 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142};
143
144struct mem_cgroup_tree {
145 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146};
147
148static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
150
151struct mem_cgroup_eventfd_list {
152 struct list_head list;
153 struct eventfd_ctx *eventfd;
154};
155
156
157
158
159struct mem_cgroup_event {
160
161
162
163 struct mem_cgroup *memcg;
164
165
166
167 struct eventfd_ctx *eventfd;
168
169
170
171 struct list_head list;
172
173
174
175
176
177 int (*register_event)(struct mem_cgroup *memcg,
178 struct eventfd_ctx *eventfd, const char *args);
179
180
181
182
183
184 void (*unregister_event)(struct mem_cgroup *memcg,
185 struct eventfd_ctx *eventfd);
186
187
188
189
190 poll_table pt;
191 wait_queue_head_t *wqh;
192 wait_queue_t wait;
193 struct work_struct remove;
194};
195
196static void mem_cgroup_threshold(struct mem_cgroup *memcg);
197static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
198
199
200
201
202
203#define MOVE_ANON 0x1U
204#define MOVE_FILE 0x2U
205#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
206
207
208static struct move_charge_struct {
209 spinlock_t lock;
210 struct mm_struct *mm;
211 struct mem_cgroup *from;
212 struct mem_cgroup *to;
213 unsigned long flags;
214 unsigned long precharge;
215 unsigned long moved_charge;
216 unsigned long moved_swap;
217 struct task_struct *moving_task;
218 wait_queue_head_t waitq;
219} mc = {
220 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
221 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
222};
223
224
225
226
227
228#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
229#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
230
231enum charge_type {
232 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
233 MEM_CGROUP_CHARGE_TYPE_ANON,
234 MEM_CGROUP_CHARGE_TYPE_SWAPOUT,
235 MEM_CGROUP_CHARGE_TYPE_DROP,
236 NR_CHARGE_TYPE,
237};
238
239
240enum res_type {
241 _MEM,
242 _MEMSWAP,
243 _OOM_TYPE,
244 _KMEM,
245 _TCP,
246};
247
248#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
249#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
250#define MEMFILE_ATTR(val) ((val) & 0xffff)
251
252#define OOM_CONTROL (0)
253
254
255struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
256{
257 if (!memcg)
258 memcg = root_mem_cgroup;
259 return &memcg->vmpressure;
260}
261
262struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
263{
264 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
265}
266
267static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
268{
269 return (memcg == root_mem_cgroup);
270}
271
272#ifndef CONFIG_SLOB
273
274
275
276
277
278
279
280
281
282
283
284static DEFINE_IDA(memcg_cache_ida);
285int memcg_nr_cache_ids;
286
287
288static DECLARE_RWSEM(memcg_cache_ids_sem);
289
290void memcg_get_cache_ids(void)
291{
292 down_read(&memcg_cache_ids_sem);
293}
294
295void memcg_put_cache_ids(void)
296{
297 up_read(&memcg_cache_ids_sem);
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312#define MEMCG_CACHES_MIN_SIZE 4
313#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
314
315
316
317
318
319
320
321DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
322EXPORT_SYMBOL(memcg_kmem_enabled_key);
323
324#endif
325
326static struct mem_cgroup_per_zone *
327mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
328{
329 int nid = zone_to_nid(zone);
330 int zid = zone_idx(zone);
331
332 return &memcg->nodeinfo[nid]->zoneinfo[zid];
333}
334
335
336
337
338
339
340
341
342
343
344
345
346struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
347{
348 struct mem_cgroup *memcg;
349
350 memcg = page->mem_cgroup;
351
352 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
353 memcg = root_mem_cgroup;
354
355 return &memcg->css;
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371ino_t page_cgroup_ino(struct page *page)
372{
373 struct mem_cgroup *memcg;
374 unsigned long ino = 0;
375
376 rcu_read_lock();
377 memcg = READ_ONCE(page->mem_cgroup);
378 while (memcg && !(memcg->css.flags & CSS_ONLINE))
379 memcg = parent_mem_cgroup(memcg);
380 if (memcg)
381 ino = cgroup_ino(memcg->css.cgroup);
382 rcu_read_unlock();
383 return ino;
384}
385
386static struct mem_cgroup_per_zone *
387mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
388{
389 int nid = page_to_nid(page);
390 int zid = page_zonenum(page);
391
392 return &memcg->nodeinfo[nid]->zoneinfo[zid];
393}
394
395static struct mem_cgroup_tree_per_zone *
396soft_limit_tree_node_zone(int nid, int zid)
397{
398 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
399}
400
401static struct mem_cgroup_tree_per_zone *
402soft_limit_tree_from_page(struct page *page)
403{
404 int nid = page_to_nid(page);
405 int zid = page_zonenum(page);
406
407 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
408}
409
410static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
411 struct mem_cgroup_tree_per_zone *mctz,
412 unsigned long new_usage_in_excess)
413{
414 struct rb_node **p = &mctz->rb_root.rb_node;
415 struct rb_node *parent = NULL;
416 struct mem_cgroup_per_zone *mz_node;
417
418 if (mz->on_tree)
419 return;
420
421 mz->usage_in_excess = new_usage_in_excess;
422 if (!mz->usage_in_excess)
423 return;
424 while (*p) {
425 parent = *p;
426 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
427 tree_node);
428 if (mz->usage_in_excess < mz_node->usage_in_excess)
429 p = &(*p)->rb_left;
430
431
432
433
434 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
435 p = &(*p)->rb_right;
436 }
437 rb_link_node(&mz->tree_node, parent, p);
438 rb_insert_color(&mz->tree_node, &mctz->rb_root);
439 mz->on_tree = true;
440}
441
442static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
443 struct mem_cgroup_tree_per_zone *mctz)
444{
445 if (!mz->on_tree)
446 return;
447 rb_erase(&mz->tree_node, &mctz->rb_root);
448 mz->on_tree = false;
449}
450
451static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
452 struct mem_cgroup_tree_per_zone *mctz)
453{
454 unsigned long flags;
455
456 spin_lock_irqsave(&mctz->lock, flags);
457 __mem_cgroup_remove_exceeded(mz, mctz);
458 spin_unlock_irqrestore(&mctz->lock, flags);
459}
460
461static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
462{
463 unsigned long nr_pages = page_counter_read(&memcg->memory);
464 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
465 unsigned long excess = 0;
466
467 if (nr_pages > soft_limit)
468 excess = nr_pages - soft_limit;
469
470 return excess;
471}
472
473static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
474{
475 unsigned long excess;
476 struct mem_cgroup_per_zone *mz;
477 struct mem_cgroup_tree_per_zone *mctz;
478
479 mctz = soft_limit_tree_from_page(page);
480
481
482
483
484 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
485 mz = mem_cgroup_page_zoneinfo(memcg, page);
486 excess = soft_limit_excess(memcg);
487
488
489
490
491 if (excess || mz->on_tree) {
492 unsigned long flags;
493
494 spin_lock_irqsave(&mctz->lock, flags);
495
496 if (mz->on_tree)
497 __mem_cgroup_remove_exceeded(mz, mctz);
498
499
500
501
502 __mem_cgroup_insert_exceeded(mz, mctz, excess);
503 spin_unlock_irqrestore(&mctz->lock, flags);
504 }
505 }
506}
507
508static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
509{
510 struct mem_cgroup_tree_per_zone *mctz;
511 struct mem_cgroup_per_zone *mz;
512 int nid, zid;
513
514 for_each_node(nid) {
515 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
516 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
517 mctz = soft_limit_tree_node_zone(nid, zid);
518 mem_cgroup_remove_exceeded(mz, mctz);
519 }
520 }
521}
522
523static struct mem_cgroup_per_zone *
524__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
525{
526 struct rb_node *rightmost = NULL;
527 struct mem_cgroup_per_zone *mz;
528
529retry:
530 mz = NULL;
531 rightmost = rb_last(&mctz->rb_root);
532 if (!rightmost)
533 goto done;
534
535 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
536
537
538
539
540
541 __mem_cgroup_remove_exceeded(mz, mctz);
542 if (!soft_limit_excess(mz->memcg) ||
543 !css_tryget_online(&mz->memcg->css))
544 goto retry;
545done:
546 return mz;
547}
548
549static struct mem_cgroup_per_zone *
550mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
551{
552 struct mem_cgroup_per_zone *mz;
553
554 spin_lock_irq(&mctz->lock);
555 mz = __mem_cgroup_largest_soft_limit_node(mctz);
556 spin_unlock_irq(&mctz->lock);
557 return mz;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static unsigned long
582mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
583{
584 long val = 0;
585 int cpu;
586
587
588 for_each_possible_cpu(cpu)
589 val += per_cpu(memcg->stat->count[idx], cpu);
590
591
592
593
594 if (val < 0)
595 val = 0;
596 return val;
597}
598
599static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
600 enum mem_cgroup_events_index idx)
601{
602 unsigned long val = 0;
603 int cpu;
604
605 for_each_possible_cpu(cpu)
606 val += per_cpu(memcg->stat->events[idx], cpu);
607 return val;
608}
609
610static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
611 struct page *page,
612 bool compound, int nr_pages)
613{
614
615
616
617
618 if (PageAnon(page))
619 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
620 nr_pages);
621 else
622 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
623 nr_pages);
624
625 if (compound) {
626 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
627 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
628 nr_pages);
629 }
630
631
632 if (nr_pages > 0)
633 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
634 else {
635 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
636 nr_pages = -nr_pages;
637 }
638
639 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
640}
641
642unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
643 int nid, unsigned int lru_mask)
644{
645 unsigned long nr = 0;
646 int zid;
647
648 VM_BUG_ON((unsigned)nid >= nr_node_ids);
649
650 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
651 struct mem_cgroup_per_zone *mz;
652 enum lru_list lru;
653
654 for_each_lru(lru) {
655 if (!(BIT(lru) & lru_mask))
656 continue;
657 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
658 nr += mz->lru_size[lru];
659 }
660 }
661 return nr;
662}
663
664static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
665 unsigned int lru_mask)
666{
667 unsigned long nr = 0;
668 int nid;
669
670 for_each_node_state(nid, N_MEMORY)
671 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
672 return nr;
673}
674
675static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
676 enum mem_cgroup_events_target target)
677{
678 unsigned long val, next;
679
680 val = __this_cpu_read(memcg->stat->nr_page_events);
681 next = __this_cpu_read(memcg->stat->targets[target]);
682
683 if ((long)next - (long)val < 0) {
684 switch (target) {
685 case MEM_CGROUP_TARGET_THRESH:
686 next = val + THRESHOLDS_EVENTS_TARGET;
687 break;
688 case MEM_CGROUP_TARGET_SOFTLIMIT:
689 next = val + SOFTLIMIT_EVENTS_TARGET;
690 break;
691 case MEM_CGROUP_TARGET_NUMAINFO:
692 next = val + NUMAINFO_EVENTS_TARGET;
693 break;
694 default:
695 break;
696 }
697 __this_cpu_write(memcg->stat->targets[target], next);
698 return true;
699 }
700 return false;
701}
702
703
704
705
706
707static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
708{
709
710 if (unlikely(mem_cgroup_event_ratelimit(memcg,
711 MEM_CGROUP_TARGET_THRESH))) {
712 bool do_softlimit;
713 bool do_numainfo __maybe_unused;
714
715 do_softlimit = mem_cgroup_event_ratelimit(memcg,
716 MEM_CGROUP_TARGET_SOFTLIMIT);
717#if MAX_NUMNODES > 1
718 do_numainfo = mem_cgroup_event_ratelimit(memcg,
719 MEM_CGROUP_TARGET_NUMAINFO);
720#endif
721 mem_cgroup_threshold(memcg);
722 if (unlikely(do_softlimit))
723 mem_cgroup_update_tree(memcg, page);
724#if MAX_NUMNODES > 1
725 if (unlikely(do_numainfo))
726 atomic_inc(&memcg->numainfo_events);
727#endif
728 }
729}
730
731struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
732{
733
734
735
736
737
738 if (unlikely(!p))
739 return NULL;
740
741 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
742}
743EXPORT_SYMBOL(mem_cgroup_from_task);
744
745static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
746{
747 struct mem_cgroup *memcg = NULL;
748
749 rcu_read_lock();
750 do {
751
752
753
754
755
756 if (unlikely(!mm))
757 memcg = root_mem_cgroup;
758 else {
759 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
760 if (unlikely(!memcg))
761 memcg = root_mem_cgroup;
762 }
763 } while (!css_tryget_online(&memcg->css));
764 rcu_read_unlock();
765 return memcg;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
786 struct mem_cgroup *prev,
787 struct mem_cgroup_reclaim_cookie *reclaim)
788{
789 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
790 struct cgroup_subsys_state *css = NULL;
791 struct mem_cgroup *memcg = NULL;
792 struct mem_cgroup *pos = NULL;
793
794 if (mem_cgroup_disabled())
795 return NULL;
796
797 if (!root)
798 root = root_mem_cgroup;
799
800 if (prev && !reclaim)
801 pos = prev;
802
803 if (!root->use_hierarchy && root != root_mem_cgroup) {
804 if (prev)
805 goto out;
806 return root;
807 }
808
809 rcu_read_lock();
810
811 if (reclaim) {
812 struct mem_cgroup_per_zone *mz;
813
814 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
815 iter = &mz->iter[reclaim->priority];
816
817 if (prev && reclaim->generation != iter->generation)
818 goto out_unlock;
819
820 while (1) {
821 pos = READ_ONCE(iter->position);
822 if (!pos || css_tryget(&pos->css))
823 break;
824
825
826
827
828
829
830
831
832 (void)cmpxchg(&iter->position, pos, NULL);
833 }
834 }
835
836 if (pos)
837 css = &pos->css;
838
839 for (;;) {
840 css = css_next_descendant_pre(css, &root->css);
841 if (!css) {
842
843
844
845
846
847
848 if (!prev)
849 continue;
850 break;
851 }
852
853
854
855
856
857
858 memcg = mem_cgroup_from_css(css);
859
860 if (css == &root->css)
861 break;
862
863 if (css_tryget(css))
864 break;
865
866 memcg = NULL;
867 }
868
869 if (reclaim) {
870
871
872
873
874
875 (void)cmpxchg(&iter->position, pos, memcg);
876
877 if (pos)
878 css_put(&pos->css);
879
880 if (!memcg)
881 iter->generation++;
882 else if (!prev)
883 reclaim->generation = iter->generation;
884 }
885
886out_unlock:
887 rcu_read_unlock();
888out:
889 if (prev && prev != root)
890 css_put(&prev->css);
891
892 return memcg;
893}
894
895
896
897
898
899
900void mem_cgroup_iter_break(struct mem_cgroup *root,
901 struct mem_cgroup *prev)
902{
903 if (!root)
904 root = root_mem_cgroup;
905 if (prev && prev != root)
906 css_put(&prev->css);
907}
908
909static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
910{
911 struct mem_cgroup *memcg = dead_memcg;
912 struct mem_cgroup_reclaim_iter *iter;
913 struct mem_cgroup_per_zone *mz;
914 int nid, zid;
915 int i;
916
917 while ((memcg = parent_mem_cgroup(memcg))) {
918 for_each_node(nid) {
919 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
920 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
921 for (i = 0; i <= DEF_PRIORITY; i++) {
922 iter = &mz->iter[i];
923 cmpxchg(&iter->position,
924 dead_memcg, NULL);
925 }
926 }
927 }
928 }
929}
930
931
932
933
934
935
936#define for_each_mem_cgroup_tree(iter, root) \
937 for (iter = mem_cgroup_iter(root, NULL, NULL); \
938 iter != NULL; \
939 iter = mem_cgroup_iter(root, iter, NULL))
940
941#define for_each_mem_cgroup(iter) \
942 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
943 iter != NULL; \
944 iter = mem_cgroup_iter(NULL, iter, NULL))
945
946
947
948
949
950
951
952
953
954
955struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
956 struct mem_cgroup *memcg)
957{
958 struct mem_cgroup_per_zone *mz;
959 struct lruvec *lruvec;
960
961 if (mem_cgroup_disabled()) {
962 lruvec = &zone->lruvec;
963 goto out;
964 }
965
966 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
967 lruvec = &mz->lruvec;
968out:
969
970
971
972
973
974 if (unlikely(lruvec->zone != zone))
975 lruvec->zone = zone;
976 return lruvec;
977}
978
979
980
981
982
983
984
985
986
987
988struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
989{
990 struct mem_cgroup_per_zone *mz;
991 struct mem_cgroup *memcg;
992 struct lruvec *lruvec;
993
994 if (mem_cgroup_disabled()) {
995 lruvec = &zone->lruvec;
996 goto out;
997 }
998
999 memcg = page->mem_cgroup;
1000
1001
1002
1003
1004 if (!memcg)
1005 memcg = root_mem_cgroup;
1006
1007 mz = mem_cgroup_page_zoneinfo(memcg, page);
1008 lruvec = &mz->lruvec;
1009out:
1010
1011
1012
1013
1014
1015 if (unlikely(lruvec->zone != zone))
1016 lruvec->zone = zone;
1017 return lruvec;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1030 int nr_pages)
1031{
1032 struct mem_cgroup_per_zone *mz;
1033 unsigned long *lru_size;
1034
1035 if (mem_cgroup_disabled())
1036 return;
1037
1038 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1039 lru_size = mz->lru_size + lru;
1040 *lru_size += nr_pages;
1041 VM_BUG_ON((long)(*lru_size) < 0);
1042}
1043
1044bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1045{
1046 struct mem_cgroup *task_memcg;
1047 struct task_struct *p;
1048 bool ret;
1049
1050 p = find_lock_task_mm(task);
1051 if (p) {
1052 task_memcg = get_mem_cgroup_from_mm(p->mm);
1053 task_unlock(p);
1054 } else {
1055
1056
1057
1058
1059
1060 rcu_read_lock();
1061 task_memcg = mem_cgroup_from_task(task);
1062 css_get(&task_memcg->css);
1063 rcu_read_unlock();
1064 }
1065 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1066 css_put(&task_memcg->css);
1067 return ret;
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1078{
1079 unsigned long margin = 0;
1080 unsigned long count;
1081 unsigned long limit;
1082
1083 count = page_counter_read(&memcg->memory);
1084 limit = READ_ONCE(memcg->memory.limit);
1085 if (count < limit)
1086 margin = limit - count;
1087
1088 if (do_memsw_account()) {
1089 count = page_counter_read(&memcg->memsw);
1090 limit = READ_ONCE(memcg->memsw.limit);
1091 if (count <= limit)
1092 margin = min(margin, limit - count);
1093 }
1094
1095 return margin;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1106{
1107 struct mem_cgroup *from;
1108 struct mem_cgroup *to;
1109 bool ret = false;
1110
1111
1112
1113
1114 spin_lock(&mc.lock);
1115 from = mc.from;
1116 to = mc.to;
1117 if (!from)
1118 goto unlock;
1119
1120 ret = mem_cgroup_is_descendant(from, memcg) ||
1121 mem_cgroup_is_descendant(to, memcg);
1122unlock:
1123 spin_unlock(&mc.lock);
1124 return ret;
1125}
1126
1127static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1128{
1129 if (mc.moving_task && current != mc.moving_task) {
1130 if (mem_cgroup_under_move(memcg)) {
1131 DEFINE_WAIT(wait);
1132 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1133
1134 if (mc.moving_task)
1135 schedule();
1136 finish_wait(&mc.waitq, &wait);
1137 return true;
1138 }
1139 }
1140 return false;
1141}
1142
1143#define K(x) ((x) << (PAGE_SHIFT-10))
1144
1145
1146
1147
1148
1149
1150
1151
1152void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1153{
1154 struct mem_cgroup *iter;
1155 unsigned int i;
1156
1157 rcu_read_lock();
1158
1159 if (p) {
1160 pr_info("Task in ");
1161 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1162 pr_cont(" killed as a result of limit of ");
1163 } else {
1164 pr_info("Memory limit reached of cgroup ");
1165 }
1166
1167 pr_cont_cgroup_path(memcg->css.cgroup);
1168 pr_cont("\n");
1169
1170 rcu_read_unlock();
1171
1172 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1173 K((u64)page_counter_read(&memcg->memory)),
1174 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1175 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1176 K((u64)page_counter_read(&memcg->memsw)),
1177 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1178 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1179 K((u64)page_counter_read(&memcg->kmem)),
1180 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1181
1182 for_each_mem_cgroup_tree(iter, memcg) {
1183 pr_info("Memory cgroup stats for ");
1184 pr_cont_cgroup_path(iter->css.cgroup);
1185 pr_cont(":");
1186
1187 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1188 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1189 continue;
1190 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1191 K(mem_cgroup_read_stat(iter, i)));
1192 }
1193
1194 for (i = 0; i < NR_LRU_LISTS; i++)
1195 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1196 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1197
1198 pr_cont("\n");
1199 }
1200}
1201
1202
1203
1204
1205
1206static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1207{
1208 int num = 0;
1209 struct mem_cgroup *iter;
1210
1211 for_each_mem_cgroup_tree(iter, memcg)
1212 num++;
1213 return num;
1214}
1215
1216
1217
1218
1219static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1220{
1221 unsigned long limit;
1222
1223 limit = memcg->memory.limit;
1224 if (mem_cgroup_swappiness(memcg)) {
1225 unsigned long memsw_limit;
1226 unsigned long swap_limit;
1227
1228 memsw_limit = memcg->memsw.limit;
1229 swap_limit = memcg->swap.limit;
1230 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1231 limit = min(limit + swap_limit, memsw_limit);
1232 }
1233 return limit;
1234}
1235
1236static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1237 int order)
1238{
1239 struct oom_control oc = {
1240 .zonelist = NULL,
1241 .nodemask = NULL,
1242 .gfp_mask = gfp_mask,
1243 .order = order,
1244 };
1245 struct mem_cgroup *iter;
1246 unsigned long chosen_points = 0;
1247 unsigned long totalpages;
1248 unsigned int points = 0;
1249 struct task_struct *chosen = NULL;
1250
1251 mutex_lock(&oom_lock);
1252
1253
1254
1255
1256
1257
1258 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1259 mark_oom_victim(current);
1260 goto unlock;
1261 }
1262
1263 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1264 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1265 for_each_mem_cgroup_tree(iter, memcg) {
1266 struct css_task_iter it;
1267 struct task_struct *task;
1268
1269 css_task_iter_start(&iter->css, &it);
1270 while ((task = css_task_iter_next(&it))) {
1271 switch (oom_scan_process_thread(&oc, task, totalpages)) {
1272 case OOM_SCAN_SELECT:
1273 if (chosen)
1274 put_task_struct(chosen);
1275 chosen = task;
1276 chosen_points = ULONG_MAX;
1277 get_task_struct(chosen);
1278
1279 case OOM_SCAN_CONTINUE:
1280 continue;
1281 case OOM_SCAN_ABORT:
1282 css_task_iter_end(&it);
1283 mem_cgroup_iter_break(memcg, iter);
1284 if (chosen)
1285 put_task_struct(chosen);
1286 goto unlock;
1287 case OOM_SCAN_OK:
1288 break;
1289 };
1290 points = oom_badness(task, memcg, NULL, totalpages);
1291 if (!points || points < chosen_points)
1292 continue;
1293
1294 if (points == chosen_points &&
1295 thread_group_leader(chosen))
1296 continue;
1297
1298 if (chosen)
1299 put_task_struct(chosen);
1300 chosen = task;
1301 chosen_points = points;
1302 get_task_struct(chosen);
1303 }
1304 css_task_iter_end(&it);
1305 }
1306
1307 if (chosen) {
1308 points = chosen_points * 1000 / totalpages;
1309 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1310 "Memory cgroup out of memory");
1311 }
1312unlock:
1313 mutex_unlock(&oom_lock);
1314 return chosen;
1315}
1316
1317#if MAX_NUMNODES > 1
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1330 int nid, bool noswap)
1331{
1332 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1333 return true;
1334 if (noswap || !total_swap_pages)
1335 return false;
1336 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1337 return true;
1338 return false;
1339
1340}
1341
1342
1343
1344
1345
1346
1347
1348static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1349{
1350 int nid;
1351
1352
1353
1354
1355 if (!atomic_read(&memcg->numainfo_events))
1356 return;
1357 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1358 return;
1359
1360
1361 memcg->scan_nodes = node_states[N_MEMORY];
1362
1363 for_each_node_mask(nid, node_states[N_MEMORY]) {
1364
1365 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1366 node_clear(nid, memcg->scan_nodes);
1367 }
1368
1369 atomic_set(&memcg->numainfo_events, 0);
1370 atomic_set(&memcg->numainfo_updating, 0);
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1386{
1387 int node;
1388
1389 mem_cgroup_may_update_nodemask(memcg);
1390 node = memcg->last_scanned_node;
1391
1392 node = next_node(node, memcg->scan_nodes);
1393 if (node == MAX_NUMNODES)
1394 node = first_node(memcg->scan_nodes);
1395
1396
1397
1398
1399
1400
1401 if (unlikely(node == MAX_NUMNODES))
1402 node = numa_node_id();
1403
1404 memcg->last_scanned_node = node;
1405 return node;
1406}
1407#else
1408int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1409{
1410 return 0;
1411}
1412#endif
1413
1414static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1415 struct zone *zone,
1416 gfp_t gfp_mask,
1417 unsigned long *total_scanned)
1418{
1419 struct mem_cgroup *victim = NULL;
1420 int total = 0;
1421 int loop = 0;
1422 unsigned long excess;
1423 unsigned long nr_scanned;
1424 struct mem_cgroup_reclaim_cookie reclaim = {
1425 .zone = zone,
1426 .priority = 0,
1427 };
1428
1429 excess = soft_limit_excess(root_memcg);
1430
1431 while (1) {
1432 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1433 if (!victim) {
1434 loop++;
1435 if (loop >= 2) {
1436
1437
1438
1439
1440
1441 if (!total)
1442 break;
1443
1444
1445
1446
1447
1448
1449 if (total >= (excess >> 2) ||
1450 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1451 break;
1452 }
1453 continue;
1454 }
1455 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1456 zone, &nr_scanned);
1457 *total_scanned += nr_scanned;
1458 if (!soft_limit_excess(root_memcg))
1459 break;
1460 }
1461 mem_cgroup_iter_break(root_memcg, victim);
1462 return total;
1463}
1464
1465#ifdef CONFIG_LOCKDEP
1466static struct lockdep_map memcg_oom_lock_dep_map = {
1467 .name = "memcg_oom_lock",
1468};
1469#endif
1470
1471static DEFINE_SPINLOCK(memcg_oom_lock);
1472
1473
1474
1475
1476
1477static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1478{
1479 struct mem_cgroup *iter, *failed = NULL;
1480
1481 spin_lock(&memcg_oom_lock);
1482
1483 for_each_mem_cgroup_tree(iter, memcg) {
1484 if (iter->oom_lock) {
1485
1486
1487
1488
1489 failed = iter;
1490 mem_cgroup_iter_break(memcg, iter);
1491 break;
1492 } else
1493 iter->oom_lock = true;
1494 }
1495
1496 if (failed) {
1497
1498
1499
1500
1501 for_each_mem_cgroup_tree(iter, memcg) {
1502 if (iter == failed) {
1503 mem_cgroup_iter_break(memcg, iter);
1504 break;
1505 }
1506 iter->oom_lock = false;
1507 }
1508 } else
1509 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1510
1511 spin_unlock(&memcg_oom_lock);
1512
1513 return !failed;
1514}
1515
1516static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1517{
1518 struct mem_cgroup *iter;
1519
1520 spin_lock(&memcg_oom_lock);
1521 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1522 for_each_mem_cgroup_tree(iter, memcg)
1523 iter->oom_lock = false;
1524 spin_unlock(&memcg_oom_lock);
1525}
1526
1527static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1528{
1529 struct mem_cgroup *iter;
1530
1531 spin_lock(&memcg_oom_lock);
1532 for_each_mem_cgroup_tree(iter, memcg)
1533 iter->under_oom++;
1534 spin_unlock(&memcg_oom_lock);
1535}
1536
1537static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1538{
1539 struct mem_cgroup *iter;
1540
1541
1542
1543
1544
1545 spin_lock(&memcg_oom_lock);
1546 for_each_mem_cgroup_tree(iter, memcg)
1547 if (iter->under_oom > 0)
1548 iter->under_oom--;
1549 spin_unlock(&memcg_oom_lock);
1550}
1551
1552static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1553
1554struct oom_wait_info {
1555 struct mem_cgroup *memcg;
1556 wait_queue_t wait;
1557};
1558
1559static int memcg_oom_wake_function(wait_queue_t *wait,
1560 unsigned mode, int sync, void *arg)
1561{
1562 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1563 struct mem_cgroup *oom_wait_memcg;
1564 struct oom_wait_info *oom_wait_info;
1565
1566 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1567 oom_wait_memcg = oom_wait_info->memcg;
1568
1569 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1570 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1571 return 0;
1572 return autoremove_wake_function(wait, mode, sync, arg);
1573}
1574
1575static void memcg_oom_recover(struct mem_cgroup *memcg)
1576{
1577
1578
1579
1580
1581
1582
1583
1584
1585 if (memcg && memcg->under_oom)
1586 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1587}
1588
1589static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1590{
1591 if (!current->memcg_may_oom)
1592 return;
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 css_get(&memcg->css);
1608 current->memcg_in_oom = memcg;
1609 current->memcg_oom_gfp_mask = mask;
1610 current->memcg_oom_order = order;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630bool mem_cgroup_oom_synchronize(bool handle)
1631{
1632 struct mem_cgroup *memcg = current->memcg_in_oom;
1633 struct oom_wait_info owait;
1634 bool locked;
1635
1636
1637 if (!memcg)
1638 return false;
1639
1640 if (!handle || oom_killer_disabled)
1641 goto cleanup;
1642
1643 owait.memcg = memcg;
1644 owait.wait.flags = 0;
1645 owait.wait.func = memcg_oom_wake_function;
1646 owait.wait.private = current;
1647 INIT_LIST_HEAD(&owait.wait.task_list);
1648
1649 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1650 mem_cgroup_mark_under_oom(memcg);
1651
1652 locked = mem_cgroup_oom_trylock(memcg);
1653
1654 if (locked)
1655 mem_cgroup_oom_notify(memcg);
1656
1657 if (locked && !memcg->oom_kill_disable) {
1658 mem_cgroup_unmark_under_oom(memcg);
1659 finish_wait(&memcg_oom_waitq, &owait.wait);
1660 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1661 current->memcg_oom_order);
1662 } else {
1663 schedule();
1664 mem_cgroup_unmark_under_oom(memcg);
1665 finish_wait(&memcg_oom_waitq, &owait.wait);
1666 }
1667
1668 if (locked) {
1669 mem_cgroup_oom_unlock(memcg);
1670
1671
1672
1673
1674
1675 memcg_oom_recover(memcg);
1676 }
1677cleanup:
1678 current->memcg_in_oom = NULL;
1679 css_put(&memcg->css);
1680 return true;
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690void lock_page_memcg(struct page *page)
1691{
1692 struct mem_cgroup *memcg;
1693 unsigned long flags;
1694
1695
1696
1697
1698
1699
1700 rcu_read_lock();
1701
1702 if (mem_cgroup_disabled())
1703 return;
1704again:
1705 memcg = page->mem_cgroup;
1706 if (unlikely(!memcg))
1707 return;
1708
1709 if (atomic_read(&memcg->moving_account) <= 0)
1710 return;
1711
1712 spin_lock_irqsave(&memcg->move_lock, flags);
1713 if (memcg != page->mem_cgroup) {
1714 spin_unlock_irqrestore(&memcg->move_lock, flags);
1715 goto again;
1716 }
1717
1718
1719
1720
1721
1722
1723 memcg->move_lock_task = current;
1724 memcg->move_lock_flags = flags;
1725
1726 return;
1727}
1728EXPORT_SYMBOL(lock_page_memcg);
1729
1730
1731
1732
1733
1734void unlock_page_memcg(struct page *page)
1735{
1736 struct mem_cgroup *memcg = page->mem_cgroup;
1737
1738 if (memcg && memcg->move_lock_task == current) {
1739 unsigned long flags = memcg->move_lock_flags;
1740
1741 memcg->move_lock_task = NULL;
1742 memcg->move_lock_flags = 0;
1743
1744 spin_unlock_irqrestore(&memcg->move_lock, flags);
1745 }
1746
1747 rcu_read_unlock();
1748}
1749EXPORT_SYMBOL(unlock_page_memcg);
1750
1751
1752
1753
1754
1755#define CHARGE_BATCH 32U
1756struct memcg_stock_pcp {
1757 struct mem_cgroup *cached;
1758 unsigned int nr_pages;
1759 struct work_struct work;
1760 unsigned long flags;
1761#define FLUSHING_CACHED_CHARGE 0
1762};
1763static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1764static DEFINE_MUTEX(percpu_charge_mutex);
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1778{
1779 struct memcg_stock_pcp *stock;
1780 bool ret = false;
1781
1782 if (nr_pages > CHARGE_BATCH)
1783 return ret;
1784
1785 stock = &get_cpu_var(memcg_stock);
1786 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1787 stock->nr_pages -= nr_pages;
1788 ret = true;
1789 }
1790 put_cpu_var(memcg_stock);
1791 return ret;
1792}
1793
1794
1795
1796
1797static void drain_stock(struct memcg_stock_pcp *stock)
1798{
1799 struct mem_cgroup *old = stock->cached;
1800
1801 if (stock->nr_pages) {
1802 page_counter_uncharge(&old->memory, stock->nr_pages);
1803 if (do_memsw_account())
1804 page_counter_uncharge(&old->memsw, stock->nr_pages);
1805 css_put_many(&old->css, stock->nr_pages);
1806 stock->nr_pages = 0;
1807 }
1808 stock->cached = NULL;
1809}
1810
1811
1812
1813
1814
1815static void drain_local_stock(struct work_struct *dummy)
1816{
1817 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1818 drain_stock(stock);
1819 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1820}
1821
1822
1823
1824
1825
1826static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1827{
1828 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1829
1830 if (stock->cached != memcg) {
1831 drain_stock(stock);
1832 stock->cached = memcg;
1833 }
1834 stock->nr_pages += nr_pages;
1835 put_cpu_var(memcg_stock);
1836}
1837
1838
1839
1840
1841
1842static void drain_all_stock(struct mem_cgroup *root_memcg)
1843{
1844 int cpu, curcpu;
1845
1846
1847 if (!mutex_trylock(&percpu_charge_mutex))
1848 return;
1849
1850 get_online_cpus();
1851 curcpu = get_cpu();
1852 for_each_online_cpu(cpu) {
1853 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1854 struct mem_cgroup *memcg;
1855
1856 memcg = stock->cached;
1857 if (!memcg || !stock->nr_pages)
1858 continue;
1859 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1860 continue;
1861 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1862 if (cpu == curcpu)
1863 drain_local_stock(&stock->work);
1864 else
1865 schedule_work_on(cpu, &stock->work);
1866 }
1867 }
1868 put_cpu();
1869 put_online_cpus();
1870 mutex_unlock(&percpu_charge_mutex);
1871}
1872
1873static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1874 unsigned long action,
1875 void *hcpu)
1876{
1877 int cpu = (unsigned long)hcpu;
1878 struct memcg_stock_pcp *stock;
1879
1880 if (action == CPU_ONLINE)
1881 return NOTIFY_OK;
1882
1883 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1884 return NOTIFY_OK;
1885
1886 stock = &per_cpu(memcg_stock, cpu);
1887 drain_stock(stock);
1888 return NOTIFY_OK;
1889}
1890
1891static void reclaim_high(struct mem_cgroup *memcg,
1892 unsigned int nr_pages,
1893 gfp_t gfp_mask)
1894{
1895 do {
1896 if (page_counter_read(&memcg->memory) <= memcg->high)
1897 continue;
1898 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1899 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1900 } while ((memcg = parent_mem_cgroup(memcg)));
1901}
1902
1903static void high_work_func(struct work_struct *work)
1904{
1905 struct mem_cgroup *memcg;
1906
1907 memcg = container_of(work, struct mem_cgroup, high_work);
1908 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1909}
1910
1911
1912
1913
1914
1915void mem_cgroup_handle_over_high(void)
1916{
1917 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1918 struct mem_cgroup *memcg;
1919
1920 if (likely(!nr_pages))
1921 return;
1922
1923 memcg = get_mem_cgroup_from_mm(current->mm);
1924 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1925 css_put(&memcg->css);
1926 current->memcg_nr_pages_over_high = 0;
1927}
1928
1929static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1930 unsigned int nr_pages)
1931{
1932 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1933 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1934 struct mem_cgroup *mem_over_limit;
1935 struct page_counter *counter;
1936 unsigned long nr_reclaimed;
1937 bool may_swap = true;
1938 bool drained = false;
1939
1940 if (mem_cgroup_is_root(memcg))
1941 return 0;
1942retry:
1943 if (consume_stock(memcg, nr_pages))
1944 return 0;
1945
1946 if (!do_memsw_account() ||
1947 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1948 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1949 goto done_restock;
1950 if (do_memsw_account())
1951 page_counter_uncharge(&memcg->memsw, batch);
1952 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1953 } else {
1954 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1955 may_swap = false;
1956 }
1957
1958 if (batch > nr_pages) {
1959 batch = nr_pages;
1960 goto retry;
1961 }
1962
1963
1964
1965
1966
1967
1968
1969 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1970 fatal_signal_pending(current) ||
1971 current->flags & PF_EXITING))
1972 goto force;
1973
1974 if (unlikely(task_in_memcg_oom(current)))
1975 goto nomem;
1976
1977 if (!gfpflags_allow_blocking(gfp_mask))
1978 goto nomem;
1979
1980 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1981
1982 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1983 gfp_mask, may_swap);
1984
1985 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1986 goto retry;
1987
1988 if (!drained) {
1989 drain_all_stock(mem_over_limit);
1990 drained = true;
1991 goto retry;
1992 }
1993
1994 if (gfp_mask & __GFP_NORETRY)
1995 goto nomem;
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2006 goto retry;
2007
2008
2009
2010
2011 if (mem_cgroup_wait_acct_move(mem_over_limit))
2012 goto retry;
2013
2014 if (nr_retries--)
2015 goto retry;
2016
2017 if (gfp_mask & __GFP_NOFAIL)
2018 goto force;
2019
2020 if (fatal_signal_pending(current))
2021 goto force;
2022
2023 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2024
2025 mem_cgroup_oom(mem_over_limit, gfp_mask,
2026 get_order(nr_pages * PAGE_SIZE));
2027nomem:
2028 if (!(gfp_mask & __GFP_NOFAIL))
2029 return -ENOMEM;
2030force:
2031
2032
2033
2034
2035
2036 page_counter_charge(&memcg->memory, nr_pages);
2037 if (do_memsw_account())
2038 page_counter_charge(&memcg->memsw, nr_pages);
2039 css_get_many(&memcg->css, nr_pages);
2040
2041 return 0;
2042
2043done_restock:
2044 css_get_many(&memcg->css, batch);
2045 if (batch > nr_pages)
2046 refill_stock(memcg, batch - nr_pages);
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 do {
2058 if (page_counter_read(&memcg->memory) > memcg->high) {
2059
2060 if (in_interrupt()) {
2061 schedule_work(&memcg->high_work);
2062 break;
2063 }
2064 current->memcg_nr_pages_over_high += batch;
2065 set_notify_resume(current);
2066 break;
2067 }
2068 } while ((memcg = parent_mem_cgroup(memcg)));
2069
2070 return 0;
2071}
2072
2073static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2074{
2075 if (mem_cgroup_is_root(memcg))
2076 return;
2077
2078 page_counter_uncharge(&memcg->memory, nr_pages);
2079 if (do_memsw_account())
2080 page_counter_uncharge(&memcg->memsw, nr_pages);
2081
2082 css_put_many(&memcg->css, nr_pages);
2083}
2084
2085static void lock_page_lru(struct page *page, int *isolated)
2086{
2087 struct zone *zone = page_zone(page);
2088
2089 spin_lock_irq(&zone->lru_lock);
2090 if (PageLRU(page)) {
2091 struct lruvec *lruvec;
2092
2093 lruvec = mem_cgroup_page_lruvec(page, zone);
2094 ClearPageLRU(page);
2095 del_page_from_lru_list(page, lruvec, page_lru(page));
2096 *isolated = 1;
2097 } else
2098 *isolated = 0;
2099}
2100
2101static void unlock_page_lru(struct page *page, int isolated)
2102{
2103 struct zone *zone = page_zone(page);
2104
2105 if (isolated) {
2106 struct lruvec *lruvec;
2107
2108 lruvec = mem_cgroup_page_lruvec(page, zone);
2109 VM_BUG_ON_PAGE(PageLRU(page), page);
2110 SetPageLRU(page);
2111 add_page_to_lru_list(page, lruvec, page_lru(page));
2112 }
2113 spin_unlock_irq(&zone->lru_lock);
2114}
2115
2116static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2117 bool lrucare)
2118{
2119 int isolated;
2120
2121 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2122
2123
2124
2125
2126
2127 if (lrucare)
2128 lock_page_lru(page, &isolated);
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144 page->mem_cgroup = memcg;
2145
2146 if (lrucare)
2147 unlock_page_lru(page, isolated);
2148}
2149
2150#ifndef CONFIG_SLOB
2151static int memcg_alloc_cache_id(void)
2152{
2153 int id, size;
2154 int err;
2155
2156 id = ida_simple_get(&memcg_cache_ida,
2157 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2158 if (id < 0)
2159 return id;
2160
2161 if (id < memcg_nr_cache_ids)
2162 return id;
2163
2164
2165
2166
2167
2168 down_write(&memcg_cache_ids_sem);
2169
2170 size = 2 * (id + 1);
2171 if (size < MEMCG_CACHES_MIN_SIZE)
2172 size = MEMCG_CACHES_MIN_SIZE;
2173 else if (size > MEMCG_CACHES_MAX_SIZE)
2174 size = MEMCG_CACHES_MAX_SIZE;
2175
2176 err = memcg_update_all_caches(size);
2177 if (!err)
2178 err = memcg_update_all_list_lrus(size);
2179 if (!err)
2180 memcg_nr_cache_ids = size;
2181
2182 up_write(&memcg_cache_ids_sem);
2183
2184 if (err) {
2185 ida_simple_remove(&memcg_cache_ida, id);
2186 return err;
2187 }
2188 return id;
2189}
2190
2191static void memcg_free_cache_id(int id)
2192{
2193 ida_simple_remove(&memcg_cache_ida, id);
2194}
2195
2196struct memcg_kmem_cache_create_work {
2197 struct mem_cgroup *memcg;
2198 struct kmem_cache *cachep;
2199 struct work_struct work;
2200};
2201
2202static void memcg_kmem_cache_create_func(struct work_struct *w)
2203{
2204 struct memcg_kmem_cache_create_work *cw =
2205 container_of(w, struct memcg_kmem_cache_create_work, work);
2206 struct mem_cgroup *memcg = cw->memcg;
2207 struct kmem_cache *cachep = cw->cachep;
2208
2209 memcg_create_kmem_cache(memcg, cachep);
2210
2211 css_put(&memcg->css);
2212 kfree(cw);
2213}
2214
2215
2216
2217
2218static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2219 struct kmem_cache *cachep)
2220{
2221 struct memcg_kmem_cache_create_work *cw;
2222
2223 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2224 if (!cw)
2225 return;
2226
2227 css_get(&memcg->css);
2228
2229 cw->memcg = memcg;
2230 cw->cachep = cachep;
2231 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2232
2233 schedule_work(&cw->work);
2234}
2235
2236static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2237 struct kmem_cache *cachep)
2238{
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250 current->memcg_kmem_skip_account = 1;
2251 __memcg_schedule_kmem_cache_create(memcg, cachep);
2252 current->memcg_kmem_skip_account = 0;
2253}
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2269{
2270 struct mem_cgroup *memcg;
2271 struct kmem_cache *memcg_cachep;
2272 int kmemcg_id;
2273
2274 VM_BUG_ON(!is_root_cache(cachep));
2275
2276 if (cachep->flags & SLAB_ACCOUNT)
2277 gfp |= __GFP_ACCOUNT;
2278
2279 if (!(gfp & __GFP_ACCOUNT))
2280 return cachep;
2281
2282 if (current->memcg_kmem_skip_account)
2283 return cachep;
2284
2285 memcg = get_mem_cgroup_from_mm(current->mm);
2286 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2287 if (kmemcg_id < 0)
2288 goto out;
2289
2290 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2291 if (likely(memcg_cachep))
2292 return memcg_cachep;
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306 memcg_schedule_kmem_cache_create(memcg, cachep);
2307out:
2308 css_put(&memcg->css);
2309 return cachep;
2310}
2311
2312void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2313{
2314 if (!is_root_cache(cachep))
2315 css_put(&cachep->memcg_params.memcg->css);
2316}
2317
2318int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2319 struct mem_cgroup *memcg)
2320{
2321 unsigned int nr_pages = 1 << order;
2322 struct page_counter *counter;
2323 int ret;
2324
2325 ret = try_charge(memcg, gfp, nr_pages);
2326 if (ret)
2327 return ret;
2328
2329 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2330 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2331 cancel_charge(memcg, nr_pages);
2332 return -ENOMEM;
2333 }
2334
2335 page->mem_cgroup = memcg;
2336
2337 return 0;
2338}
2339
2340int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342 struct mem_cgroup *memcg;
2343 int ret = 0;
2344
2345 memcg = get_mem_cgroup_from_mm(current->mm);
2346 if (!mem_cgroup_is_root(memcg))
2347 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2348 css_put(&memcg->css);
2349 return ret;
2350}
2351
2352void __memcg_kmem_uncharge(struct page *page, int order)
2353{
2354 struct mem_cgroup *memcg = page->mem_cgroup;
2355 unsigned int nr_pages = 1 << order;
2356
2357 if (!memcg)
2358 return;
2359
2360 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2361
2362 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2363 page_counter_uncharge(&memcg->kmem, nr_pages);
2364
2365 page_counter_uncharge(&memcg->memory, nr_pages);
2366 if (do_memsw_account())
2367 page_counter_uncharge(&memcg->memsw, nr_pages);
2368
2369 page->mem_cgroup = NULL;
2370 css_put_many(&memcg->css, nr_pages);
2371}
2372#endif
2373
2374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2375
2376
2377
2378
2379
2380void mem_cgroup_split_huge_fixup(struct page *head)
2381{
2382 int i;
2383
2384 if (mem_cgroup_disabled())
2385 return;
2386
2387 for (i = 1; i < HPAGE_PMD_NR; i++)
2388 head[i].mem_cgroup = head->mem_cgroup;
2389
2390 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2391 HPAGE_PMD_NR);
2392}
2393#endif
2394
2395#ifdef CONFIG_MEMCG_SWAP
2396static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2397 bool charge)
2398{
2399 int val = (charge) ? 1 : -1;
2400 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2401}
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static int mem_cgroup_move_swap_account(swp_entry_t entry,
2418 struct mem_cgroup *from, struct mem_cgroup *to)
2419{
2420 unsigned short old_id, new_id;
2421
2422 old_id = mem_cgroup_id(from);
2423 new_id = mem_cgroup_id(to);
2424
2425 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2426 mem_cgroup_swap_statistics(from, false);
2427 mem_cgroup_swap_statistics(to, true);
2428 return 0;
2429 }
2430 return -EINVAL;
2431}
2432#else
2433static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2434 struct mem_cgroup *from, struct mem_cgroup *to)
2435{
2436 return -EINVAL;
2437}
2438#endif
2439
2440static DEFINE_MUTEX(memcg_limit_mutex);
2441
2442static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2443 unsigned long limit)
2444{
2445 unsigned long curusage;
2446 unsigned long oldusage;
2447 bool enlarge = false;
2448 int retry_count;
2449 int ret;
2450
2451
2452
2453
2454
2455
2456 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2457 mem_cgroup_count_children(memcg);
2458
2459 oldusage = page_counter_read(&memcg->memory);
2460
2461 do {
2462 if (signal_pending(current)) {
2463 ret = -EINTR;
2464 break;
2465 }
2466
2467 mutex_lock(&memcg_limit_mutex);
2468 if (limit > memcg->memsw.limit) {
2469 mutex_unlock(&memcg_limit_mutex);
2470 ret = -EINVAL;
2471 break;
2472 }
2473 if (limit > memcg->memory.limit)
2474 enlarge = true;
2475 ret = page_counter_limit(&memcg->memory, limit);
2476 mutex_unlock(&memcg_limit_mutex);
2477
2478 if (!ret)
2479 break;
2480
2481 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2482
2483 curusage = page_counter_read(&memcg->memory);
2484
2485 if (curusage >= oldusage)
2486 retry_count--;
2487 else
2488 oldusage = curusage;
2489 } while (retry_count);
2490
2491 if (!ret && enlarge)
2492 memcg_oom_recover(memcg);
2493
2494 return ret;
2495}
2496
2497static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2498 unsigned long limit)
2499{
2500 unsigned long curusage;
2501 unsigned long oldusage;
2502 bool enlarge = false;
2503 int retry_count;
2504 int ret;
2505
2506
2507 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2508 mem_cgroup_count_children(memcg);
2509
2510 oldusage = page_counter_read(&memcg->memsw);
2511
2512 do {
2513 if (signal_pending(current)) {
2514 ret = -EINTR;
2515 break;
2516 }
2517
2518 mutex_lock(&memcg_limit_mutex);
2519 if (limit < memcg->memory.limit) {
2520 mutex_unlock(&memcg_limit_mutex);
2521 ret = -EINVAL;
2522 break;
2523 }
2524 if (limit > memcg->memsw.limit)
2525 enlarge = true;
2526 ret = page_counter_limit(&memcg->memsw, limit);
2527 mutex_unlock(&memcg_limit_mutex);
2528
2529 if (!ret)
2530 break;
2531
2532 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2533
2534 curusage = page_counter_read(&memcg->memsw);
2535
2536 if (curusage >= oldusage)
2537 retry_count--;
2538 else
2539 oldusage = curusage;
2540 } while (retry_count);
2541
2542 if (!ret && enlarge)
2543 memcg_oom_recover(memcg);
2544
2545 return ret;
2546}
2547
2548unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2549 gfp_t gfp_mask,
2550 unsigned long *total_scanned)
2551{
2552 unsigned long nr_reclaimed = 0;
2553 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2554 unsigned long reclaimed;
2555 int loop = 0;
2556 struct mem_cgroup_tree_per_zone *mctz;
2557 unsigned long excess;
2558 unsigned long nr_scanned;
2559
2560 if (order > 0)
2561 return 0;
2562
2563 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2564
2565
2566
2567
2568
2569 do {
2570 if (next_mz)
2571 mz = next_mz;
2572 else
2573 mz = mem_cgroup_largest_soft_limit_node(mctz);
2574 if (!mz)
2575 break;
2576
2577 nr_scanned = 0;
2578 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2579 gfp_mask, &nr_scanned);
2580 nr_reclaimed += reclaimed;
2581 *total_scanned += nr_scanned;
2582 spin_lock_irq(&mctz->lock);
2583 __mem_cgroup_remove_exceeded(mz, mctz);
2584
2585
2586
2587
2588
2589 next_mz = NULL;
2590 if (!reclaimed)
2591 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2592
2593 excess = soft_limit_excess(mz->memcg);
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2604 spin_unlock_irq(&mctz->lock);
2605 css_put(&mz->memcg->css);
2606 loop++;
2607
2608
2609
2610
2611
2612 if (!nr_reclaimed &&
2613 (next_mz == NULL ||
2614 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2615 break;
2616 } while (!nr_reclaimed);
2617 if (next_mz)
2618 css_put(&next_mz->memcg->css);
2619 return nr_reclaimed;
2620}
2621
2622
2623
2624
2625
2626
2627
2628static inline bool memcg_has_children(struct mem_cgroup *memcg)
2629{
2630 bool ret;
2631
2632 rcu_read_lock();
2633 ret = css_next_child(NULL, &memcg->css);
2634 rcu_read_unlock();
2635 return ret;
2636}
2637
2638
2639
2640
2641
2642
2643
2644static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2645{
2646 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2647
2648
2649 lru_add_drain_all();
2650
2651 while (nr_retries && page_counter_read(&memcg->memory)) {
2652 int progress;
2653
2654 if (signal_pending(current))
2655 return -EINTR;
2656
2657 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2658 GFP_KERNEL, true);
2659 if (!progress) {
2660 nr_retries--;
2661
2662 congestion_wait(BLK_RW_ASYNC, HZ/10);
2663 }
2664
2665 }
2666
2667 return 0;
2668}
2669
2670static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2671 char *buf, size_t nbytes,
2672 loff_t off)
2673{
2674 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2675
2676 if (mem_cgroup_is_root(memcg))
2677 return -EINVAL;
2678 return mem_cgroup_force_empty(memcg) ?: nbytes;
2679}
2680
2681static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2682 struct cftype *cft)
2683{
2684 return mem_cgroup_from_css(css)->use_hierarchy;
2685}
2686
2687static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2688 struct cftype *cft, u64 val)
2689{
2690 int retval = 0;
2691 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2692 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2693
2694 if (memcg->use_hierarchy == val)
2695 return 0;
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2706 (val == 1 || val == 0)) {
2707 if (!memcg_has_children(memcg))
2708 memcg->use_hierarchy = val;
2709 else
2710 retval = -EBUSY;
2711 } else
2712 retval = -EINVAL;
2713
2714 return retval;
2715}
2716
2717static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2718{
2719 struct mem_cgroup *iter;
2720 int i;
2721
2722 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2723
2724 for_each_mem_cgroup_tree(iter, memcg) {
2725 for (i = 0; i < MEMCG_NR_STAT; i++)
2726 stat[i] += mem_cgroup_read_stat(iter, i);
2727 }
2728}
2729
2730static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2731{
2732 struct mem_cgroup *iter;
2733 int i;
2734
2735 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2736
2737 for_each_mem_cgroup_tree(iter, memcg) {
2738 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2739 events[i] += mem_cgroup_read_events(iter, i);
2740 }
2741}
2742
2743static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2744{
2745 unsigned long val = 0;
2746
2747 if (mem_cgroup_is_root(memcg)) {
2748 struct mem_cgroup *iter;
2749
2750 for_each_mem_cgroup_tree(iter, memcg) {
2751 val += mem_cgroup_read_stat(iter,
2752 MEM_CGROUP_STAT_CACHE);
2753 val += mem_cgroup_read_stat(iter,
2754 MEM_CGROUP_STAT_RSS);
2755 if (swap)
2756 val += mem_cgroup_read_stat(iter,
2757 MEM_CGROUP_STAT_SWAP);
2758 }
2759 } else {
2760 if (!swap)
2761 val = page_counter_read(&memcg->memory);
2762 else
2763 val = page_counter_read(&memcg->memsw);
2764 }
2765 return val;
2766}
2767
2768enum {
2769 RES_USAGE,
2770 RES_LIMIT,
2771 RES_MAX_USAGE,
2772 RES_FAILCNT,
2773 RES_SOFT_LIMIT,
2774};
2775
2776static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2777 struct cftype *cft)
2778{
2779 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2780 struct page_counter *counter;
2781
2782 switch (MEMFILE_TYPE(cft->private)) {
2783 case _MEM:
2784 counter = &memcg->memory;
2785 break;
2786 case _MEMSWAP:
2787 counter = &memcg->memsw;
2788 break;
2789 case _KMEM:
2790 counter = &memcg->kmem;
2791 break;
2792 case _TCP:
2793 counter = &memcg->tcpmem;
2794 break;
2795 default:
2796 BUG();
2797 }
2798
2799 switch (MEMFILE_ATTR(cft->private)) {
2800 case RES_USAGE:
2801 if (counter == &memcg->memory)
2802 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2803 if (counter == &memcg->memsw)
2804 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2805 return (u64)page_counter_read(counter) * PAGE_SIZE;
2806 case RES_LIMIT:
2807 return (u64)counter->limit * PAGE_SIZE;
2808 case RES_MAX_USAGE:
2809 return (u64)counter->watermark * PAGE_SIZE;
2810 case RES_FAILCNT:
2811 return counter->failcnt;
2812 case RES_SOFT_LIMIT:
2813 return (u64)memcg->soft_limit * PAGE_SIZE;
2814 default:
2815 BUG();
2816 }
2817}
2818
2819#ifndef CONFIG_SLOB
2820static int memcg_online_kmem(struct mem_cgroup *memcg)
2821{
2822 int memcg_id;
2823
2824 if (cgroup_memory_nokmem)
2825 return 0;
2826
2827 BUG_ON(memcg->kmemcg_id >= 0);
2828 BUG_ON(memcg->kmem_state);
2829
2830 memcg_id = memcg_alloc_cache_id();
2831 if (memcg_id < 0)
2832 return memcg_id;
2833
2834 static_branch_inc(&memcg_kmem_enabled_key);
2835
2836
2837
2838
2839
2840
2841 memcg->kmemcg_id = memcg_id;
2842 memcg->kmem_state = KMEM_ONLINE;
2843
2844 return 0;
2845}
2846
2847static void memcg_offline_kmem(struct mem_cgroup *memcg)
2848{
2849 struct cgroup_subsys_state *css;
2850 struct mem_cgroup *parent, *child;
2851 int kmemcg_id;
2852
2853 if (memcg->kmem_state != KMEM_ONLINE)
2854 return;
2855
2856
2857
2858
2859
2860
2861 memcg->kmem_state = KMEM_ALLOCATED;
2862
2863 memcg_deactivate_kmem_caches(memcg);
2864
2865 kmemcg_id = memcg->kmemcg_id;
2866 BUG_ON(kmemcg_id < 0);
2867
2868 parent = parent_mem_cgroup(memcg);
2869 if (!parent)
2870 parent = root_mem_cgroup;
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880 css_for_each_descendant_pre(css, &memcg->css) {
2881 child = mem_cgroup_from_css(css);
2882 BUG_ON(child->kmemcg_id != kmemcg_id);
2883 child->kmemcg_id = parent->kmemcg_id;
2884 if (!memcg->use_hierarchy)
2885 break;
2886 }
2887 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2888
2889 memcg_free_cache_id(kmemcg_id);
2890}
2891
2892static void memcg_free_kmem(struct mem_cgroup *memcg)
2893{
2894
2895 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2896 memcg_offline_kmem(memcg);
2897
2898 if (memcg->kmem_state == KMEM_ALLOCATED) {
2899 memcg_destroy_kmem_caches(memcg);
2900 static_branch_dec(&memcg_kmem_enabled_key);
2901 WARN_ON(page_counter_read(&memcg->kmem));
2902 }
2903}
2904#else
2905static int memcg_online_kmem(struct mem_cgroup *memcg)
2906{
2907 return 0;
2908}
2909static void memcg_offline_kmem(struct mem_cgroup *memcg)
2910{
2911}
2912static void memcg_free_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915#endif
2916
2917static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2918 unsigned long limit)
2919{
2920 int ret;
2921
2922 mutex_lock(&memcg_limit_mutex);
2923 ret = page_counter_limit(&memcg->kmem, limit);
2924 mutex_unlock(&memcg_limit_mutex);
2925 return ret;
2926}
2927
2928static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2929{
2930 int ret;
2931
2932 mutex_lock(&memcg_limit_mutex);
2933
2934 ret = page_counter_limit(&memcg->tcpmem, limit);
2935 if (ret)
2936 goto out;
2937
2938 if (!memcg->tcpmem_active) {
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 static_branch_inc(&memcg_sockets_enabled_key);
2956 memcg->tcpmem_active = true;
2957 }
2958out:
2959 mutex_unlock(&memcg_limit_mutex);
2960 return ret;
2961}
2962
2963
2964
2965
2966
2967static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2968 char *buf, size_t nbytes, loff_t off)
2969{
2970 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2971 unsigned long nr_pages;
2972 int ret;
2973
2974 buf = strstrip(buf);
2975 ret = page_counter_memparse(buf, "-1", &nr_pages);
2976 if (ret)
2977 return ret;
2978
2979 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2980 case RES_LIMIT:
2981 if (mem_cgroup_is_root(memcg)) {
2982 ret = -EINVAL;
2983 break;
2984 }
2985 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2986 case _MEM:
2987 ret = mem_cgroup_resize_limit(memcg, nr_pages);
2988 break;
2989 case _MEMSWAP:
2990 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2991 break;
2992 case _KMEM:
2993 ret = memcg_update_kmem_limit(memcg, nr_pages);
2994 break;
2995 case _TCP:
2996 ret = memcg_update_tcp_limit(memcg, nr_pages);
2997 break;
2998 }
2999 break;
3000 case RES_SOFT_LIMIT:
3001 memcg->soft_limit = nr_pages;
3002 ret = 0;
3003 break;
3004 }
3005 return ret ?: nbytes;
3006}
3007
3008static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3009 size_t nbytes, loff_t off)
3010{
3011 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3012 struct page_counter *counter;
3013
3014 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3015 case _MEM:
3016 counter = &memcg->memory;
3017 break;
3018 case _MEMSWAP:
3019 counter = &memcg->memsw;
3020 break;
3021 case _KMEM:
3022 counter = &memcg->kmem;
3023 break;
3024 case _TCP:
3025 counter = &memcg->tcpmem;
3026 break;
3027 default:
3028 BUG();
3029 }
3030
3031 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3032 case RES_MAX_USAGE:
3033 page_counter_reset_watermark(counter);
3034 break;
3035 case RES_FAILCNT:
3036 counter->failcnt = 0;
3037 break;
3038 default:
3039 BUG();
3040 }
3041
3042 return nbytes;
3043}
3044
3045static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3046 struct cftype *cft)
3047{
3048 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3049}
3050
3051#ifdef CONFIG_MMU
3052static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3053 struct cftype *cft, u64 val)
3054{
3055 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3056
3057 if (val & ~MOVE_MASK)
3058 return -EINVAL;
3059
3060
3061
3062
3063
3064
3065
3066 memcg->move_charge_at_immigrate = val;
3067 return 0;
3068}
3069#else
3070static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3071 struct cftype *cft, u64 val)
3072{
3073 return -ENOSYS;
3074}
3075#endif
3076
3077#ifdef CONFIG_NUMA
3078static int memcg_numa_stat_show(struct seq_file *m, void *v)
3079{
3080 struct numa_stat {
3081 const char *name;
3082 unsigned int lru_mask;
3083 };
3084
3085 static const struct numa_stat stats[] = {
3086 { "total", LRU_ALL },
3087 { "file", LRU_ALL_FILE },
3088 { "anon", LRU_ALL_ANON },
3089 { "unevictable", BIT(LRU_UNEVICTABLE) },
3090 };
3091 const struct numa_stat *stat;
3092 int nid;
3093 unsigned long nr;
3094 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3095
3096 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3097 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3098 seq_printf(m, "%s=%lu", stat->name, nr);
3099 for_each_node_state(nid, N_MEMORY) {
3100 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3101 stat->lru_mask);
3102 seq_printf(m, " N%d=%lu", nid, nr);
3103 }
3104 seq_putc(m, '\n');
3105 }
3106
3107 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3108 struct mem_cgroup *iter;
3109
3110 nr = 0;
3111 for_each_mem_cgroup_tree(iter, memcg)
3112 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3113 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3114 for_each_node_state(nid, N_MEMORY) {
3115 nr = 0;
3116 for_each_mem_cgroup_tree(iter, memcg)
3117 nr += mem_cgroup_node_nr_lru_pages(
3118 iter, nid, stat->lru_mask);
3119 seq_printf(m, " N%d=%lu", nid, nr);
3120 }
3121 seq_putc(m, '\n');
3122 }
3123
3124 return 0;
3125}
3126#endif
3127
3128static int memcg_stat_show(struct seq_file *m, void *v)
3129{
3130 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3131 unsigned long memory, memsw;
3132 struct mem_cgroup *mi;
3133 unsigned int i;
3134
3135 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3136 MEM_CGROUP_STAT_NSTATS);
3137 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3138 MEM_CGROUP_EVENTS_NSTATS);
3139 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140
3141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3142 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3143 continue;
3144 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3145 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3146 }
3147
3148 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3149 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3150 mem_cgroup_read_events(memcg, i));
3151
3152 for (i = 0; i < NR_LRU_LISTS; i++)
3153 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3154 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3155
3156
3157 memory = memsw = PAGE_COUNTER_MAX;
3158 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3159 memory = min(memory, mi->memory.limit);
3160 memsw = min(memsw, mi->memsw.limit);
3161 }
3162 seq_printf(m, "hierarchical_memory_limit %llu\n",
3163 (u64)memory * PAGE_SIZE);
3164 if (do_memsw_account())
3165 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166 (u64)memsw * PAGE_SIZE);
3167
3168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3169 unsigned long long val = 0;
3170
3171 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3172 continue;
3173 for_each_mem_cgroup_tree(mi, memcg)
3174 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3175 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3176 }
3177
3178 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3179 unsigned long long val = 0;
3180
3181 for_each_mem_cgroup_tree(mi, memcg)
3182 val += mem_cgroup_read_events(mi, i);
3183 seq_printf(m, "total_%s %llu\n",
3184 mem_cgroup_events_names[i], val);
3185 }
3186
3187 for (i = 0; i < NR_LRU_LISTS; i++) {
3188 unsigned long long val = 0;
3189
3190 for_each_mem_cgroup_tree(mi, memcg)
3191 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3192 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3193 }
3194
3195#ifdef CONFIG_DEBUG_VM
3196 {
3197 int nid, zid;
3198 struct mem_cgroup_per_zone *mz;
3199 struct zone_reclaim_stat *rstat;
3200 unsigned long recent_rotated[2] = {0, 0};
3201 unsigned long recent_scanned[2] = {0, 0};
3202
3203 for_each_online_node(nid)
3204 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3205 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3206 rstat = &mz->lruvec.reclaim_stat;
3207
3208 recent_rotated[0] += rstat->recent_rotated[0];
3209 recent_rotated[1] += rstat->recent_rotated[1];
3210 recent_scanned[0] += rstat->recent_scanned[0];
3211 recent_scanned[1] += rstat->recent_scanned[1];
3212 }
3213 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3214 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3215 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3216 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3217 }
3218#endif
3219
3220 return 0;
3221}
3222
3223static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3224 struct cftype *cft)
3225{
3226 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3227
3228 return mem_cgroup_swappiness(memcg);
3229}
3230
3231static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3232 struct cftype *cft, u64 val)
3233{
3234 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3235
3236 if (val > 100)
3237 return -EINVAL;
3238
3239 if (css->parent)
3240 memcg->swappiness = val;
3241 else
3242 vm_swappiness = val;
3243
3244 return 0;
3245}
3246
3247static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3248{
3249 struct mem_cgroup_threshold_ary *t;
3250 unsigned long usage;
3251 int i;
3252
3253 rcu_read_lock();
3254 if (!swap)
3255 t = rcu_dereference(memcg->thresholds.primary);
3256 else
3257 t = rcu_dereference(memcg->memsw_thresholds.primary);
3258
3259 if (!t)
3260 goto unlock;
3261
3262 usage = mem_cgroup_usage(memcg, swap);
3263
3264
3265
3266
3267
3268
3269 i = t->current_threshold;
3270
3271
3272
3273
3274
3275
3276
3277 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3278 eventfd_signal(t->entries[i].eventfd, 1);
3279
3280
3281 i++;
3282
3283
3284
3285
3286
3287
3288
3289 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3290 eventfd_signal(t->entries[i].eventfd, 1);
3291
3292
3293 t->current_threshold = i - 1;
3294unlock:
3295 rcu_read_unlock();
3296}
3297
3298static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3299{
3300 while (memcg) {
3301 __mem_cgroup_threshold(memcg, false);
3302 if (do_memsw_account())
3303 __mem_cgroup_threshold(memcg, true);
3304
3305 memcg = parent_mem_cgroup(memcg);
3306 }
3307}
3308
3309static int compare_thresholds(const void *a, const void *b)
3310{
3311 const struct mem_cgroup_threshold *_a = a;
3312 const struct mem_cgroup_threshold *_b = b;
3313
3314 if (_a->threshold > _b->threshold)
3315 return 1;
3316
3317 if (_a->threshold < _b->threshold)
3318 return -1;
3319
3320 return 0;
3321}
3322
3323static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3324{
3325 struct mem_cgroup_eventfd_list *ev;
3326
3327 spin_lock(&memcg_oom_lock);
3328
3329 list_for_each_entry(ev, &memcg->oom_notify, list)
3330 eventfd_signal(ev->eventfd, 1);
3331
3332 spin_unlock(&memcg_oom_lock);
3333 return 0;
3334}
3335
3336static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3337{
3338 struct mem_cgroup *iter;
3339
3340 for_each_mem_cgroup_tree(iter, memcg)
3341 mem_cgroup_oom_notify_cb(iter);
3342}
3343
3344static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3345 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3346{
3347 struct mem_cgroup_thresholds *thresholds;
3348 struct mem_cgroup_threshold_ary *new;
3349 unsigned long threshold;
3350 unsigned long usage;
3351 int i, size, ret;
3352
3353 ret = page_counter_memparse(args, "-1", &threshold);
3354 if (ret)
3355 return ret;
3356
3357 mutex_lock(&memcg->thresholds_lock);
3358
3359 if (type == _MEM) {
3360 thresholds = &memcg->thresholds;
3361 usage = mem_cgroup_usage(memcg, false);
3362 } else if (type == _MEMSWAP) {
3363 thresholds = &memcg->memsw_thresholds;
3364 usage = mem_cgroup_usage(memcg, true);
3365 } else
3366 BUG();
3367
3368
3369 if (thresholds->primary)
3370 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3371
3372 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3373
3374
3375 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3376 GFP_KERNEL);
3377 if (!new) {
3378 ret = -ENOMEM;
3379 goto unlock;
3380 }
3381 new->size = size;
3382
3383
3384 if (thresholds->primary) {
3385 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3386 sizeof(struct mem_cgroup_threshold));
3387 }
3388
3389
3390 new->entries[size - 1].eventfd = eventfd;
3391 new->entries[size - 1].threshold = threshold;
3392
3393
3394 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3395 compare_thresholds, NULL);
3396
3397
3398 new->current_threshold = -1;
3399 for (i = 0; i < size; i++) {
3400 if (new->entries[i].threshold <= usage) {
3401
3402
3403
3404
3405
3406 ++new->current_threshold;
3407 } else
3408 break;
3409 }
3410
3411
3412 kfree(thresholds->spare);
3413 thresholds->spare = thresholds->primary;
3414
3415 rcu_assign_pointer(thresholds->primary, new);
3416
3417
3418 synchronize_rcu();
3419
3420unlock:
3421 mutex_unlock(&memcg->thresholds_lock);
3422
3423 return ret;
3424}
3425
3426static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3427 struct eventfd_ctx *eventfd, const char *args)
3428{
3429 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3430}
3431
3432static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3433 struct eventfd_ctx *eventfd, const char *args)
3434{
3435 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3436}
3437
3438static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3439 struct eventfd_ctx *eventfd, enum res_type type)
3440{
3441 struct mem_cgroup_thresholds *thresholds;
3442 struct mem_cgroup_threshold_ary *new;
3443 unsigned long usage;
3444 int i, j, size;
3445
3446 mutex_lock(&memcg->thresholds_lock);
3447
3448 if (type == _MEM) {
3449 thresholds = &memcg->thresholds;
3450 usage = mem_cgroup_usage(memcg, false);
3451 } else if (type == _MEMSWAP) {
3452 thresholds = &memcg->memsw_thresholds;
3453 usage = mem_cgroup_usage(memcg, true);
3454 } else
3455 BUG();
3456
3457 if (!thresholds->primary)
3458 goto unlock;
3459
3460
3461 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3462
3463
3464 size = 0;
3465 for (i = 0; i < thresholds->primary->size; i++) {
3466 if (thresholds->primary->entries[i].eventfd != eventfd)
3467 size++;
3468 }
3469
3470 new = thresholds->spare;
3471
3472
3473 if (!size) {
3474 kfree(new);
3475 new = NULL;
3476 goto swap_buffers;
3477 }
3478
3479 new->size = size;
3480
3481
3482 new->current_threshold = -1;
3483 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3484 if (thresholds->primary->entries[i].eventfd == eventfd)
3485 continue;
3486
3487 new->entries[j] = thresholds->primary->entries[i];
3488 if (new->entries[j].threshold <= usage) {
3489
3490
3491
3492
3493
3494 ++new->current_threshold;
3495 }
3496 j++;
3497 }
3498
3499swap_buffers:
3500
3501 thresholds->spare = thresholds->primary;
3502
3503 rcu_assign_pointer(thresholds->primary, new);
3504
3505
3506 synchronize_rcu();
3507
3508
3509 if (!new) {
3510 kfree(thresholds->spare);
3511 thresholds->spare = NULL;
3512 }
3513unlock:
3514 mutex_unlock(&memcg->thresholds_lock);
3515}
3516
3517static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3518 struct eventfd_ctx *eventfd)
3519{
3520 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3521}
3522
3523static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3524 struct eventfd_ctx *eventfd)
3525{
3526 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3527}
3528
3529static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3530 struct eventfd_ctx *eventfd, const char *args)
3531{
3532 struct mem_cgroup_eventfd_list *event;
3533
3534 event = kmalloc(sizeof(*event), GFP_KERNEL);
3535 if (!event)
3536 return -ENOMEM;
3537
3538 spin_lock(&memcg_oom_lock);
3539
3540 event->eventfd = eventfd;
3541 list_add(&event->list, &memcg->oom_notify);
3542
3543
3544 if (memcg->under_oom)
3545 eventfd_signal(eventfd, 1);
3546 spin_unlock(&memcg_oom_lock);
3547
3548 return 0;
3549}
3550
3551static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3552 struct eventfd_ctx *eventfd)
3553{
3554 struct mem_cgroup_eventfd_list *ev, *tmp;
3555
3556 spin_lock(&memcg_oom_lock);
3557
3558 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3559 if (ev->eventfd == eventfd) {
3560 list_del(&ev->list);
3561 kfree(ev);
3562 }
3563 }
3564
3565 spin_unlock(&memcg_oom_lock);
3566}
3567
3568static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3569{
3570 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3571
3572 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3573 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3574 return 0;
3575}
3576
3577static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3578 struct cftype *cft, u64 val)
3579{
3580 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3581
3582
3583 if (!css->parent || !((val == 0) || (val == 1)))
3584 return -EINVAL;
3585
3586 memcg->oom_kill_disable = val;
3587 if (!val)
3588 memcg_oom_recover(memcg);
3589
3590 return 0;
3591}
3592
3593#ifdef CONFIG_CGROUP_WRITEBACK
3594
3595struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3596{
3597 return &memcg->cgwb_list;
3598}
3599
3600static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3601{
3602 return wb_domain_init(&memcg->cgwb_domain, gfp);
3603}
3604
3605static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3606{
3607 wb_domain_exit(&memcg->cgwb_domain);
3608}
3609
3610static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3611{
3612 wb_domain_size_changed(&memcg->cgwb_domain);
3613}
3614
3615struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3616{
3617 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618
3619 if (!memcg->css.parent)
3620 return NULL;
3621
3622 return &memcg->cgwb_domain;
3623}
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3644 unsigned long *pheadroom, unsigned long *pdirty,
3645 unsigned long *pwriteback)
3646{
3647 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3648 struct mem_cgroup *parent;
3649
3650 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3651
3652
3653 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3654 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3655 (1 << LRU_ACTIVE_FILE));
3656 *pheadroom = PAGE_COUNTER_MAX;
3657
3658 while ((parent = parent_mem_cgroup(memcg))) {
3659 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3660 unsigned long used = page_counter_read(&memcg->memory);
3661
3662 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3663 memcg = parent;
3664 }
3665}
3666
3667#else
3668
3669static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3670{
3671 return 0;
3672}
3673
3674static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3675{
3676}
3677
3678static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3679{
3680}
3681
3682#endif
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702static void memcg_event_remove(struct work_struct *work)
3703{
3704 struct mem_cgroup_event *event =
3705 container_of(work, struct mem_cgroup_event, remove);
3706 struct mem_cgroup *memcg = event->memcg;
3707
3708 remove_wait_queue(event->wqh, &event->wait);
3709
3710 event->unregister_event(memcg, event->eventfd);
3711
3712
3713 eventfd_signal(event->eventfd, 1);
3714
3715 eventfd_ctx_put(event->eventfd);
3716 kfree(event);
3717 css_put(&memcg->css);
3718}
3719
3720
3721
3722
3723
3724
3725static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3726 int sync, void *key)
3727{
3728 struct mem_cgroup_event *event =
3729 container_of(wait, struct mem_cgroup_event, wait);
3730 struct mem_cgroup *memcg = event->memcg;
3731 unsigned long flags = (unsigned long)key;
3732
3733 if (flags & POLLHUP) {
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743 spin_lock(&memcg->event_list_lock);
3744 if (!list_empty(&event->list)) {
3745 list_del_init(&event->list);
3746
3747
3748
3749
3750 schedule_work(&event->remove);
3751 }
3752 spin_unlock(&memcg->event_list_lock);
3753 }
3754
3755 return 0;
3756}
3757
3758static void memcg_event_ptable_queue_proc(struct file *file,
3759 wait_queue_head_t *wqh, poll_table *pt)
3760{
3761 struct mem_cgroup_event *event =
3762 container_of(pt, struct mem_cgroup_event, pt);
3763
3764 event->wqh = wqh;
3765 add_wait_queue(wqh, &event->wait);
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3777 char *buf, size_t nbytes, loff_t off)
3778{
3779 struct cgroup_subsys_state *css = of_css(of);
3780 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3781 struct mem_cgroup_event *event;
3782 struct cgroup_subsys_state *cfile_css;
3783 unsigned int efd, cfd;
3784 struct fd efile;
3785 struct fd cfile;
3786 const char *name;
3787 char *endp;
3788 int ret;
3789
3790 buf = strstrip(buf);
3791
3792 efd = simple_strtoul(buf, &endp, 10);
3793 if (*endp != ' ')
3794 return -EINVAL;
3795 buf = endp + 1;
3796
3797 cfd = simple_strtoul(buf, &endp, 10);
3798 if ((*endp != ' ') && (*endp != '\0'))
3799 return -EINVAL;
3800 buf = endp + 1;
3801
3802 event = kzalloc(sizeof(*event), GFP_KERNEL);
3803 if (!event)
3804 return -ENOMEM;
3805
3806 event->memcg = memcg;
3807 INIT_LIST_HEAD(&event->list);
3808 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3809 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3810 INIT_WORK(&event->remove, memcg_event_remove);
3811
3812 efile = fdget(efd);
3813 if (!efile.file) {
3814 ret = -EBADF;
3815 goto out_kfree;
3816 }
3817
3818 event->eventfd = eventfd_ctx_fileget(efile.file);
3819 if (IS_ERR(event->eventfd)) {
3820 ret = PTR_ERR(event->eventfd);
3821 goto out_put_efile;
3822 }
3823
3824 cfile = fdget(cfd);
3825 if (!cfile.file) {
3826 ret = -EBADF;
3827 goto out_put_eventfd;
3828 }
3829
3830
3831
3832 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3833 if (ret < 0)
3834 goto out_put_cfile;
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844 name = cfile.file->f_path.dentry->d_name.name;
3845
3846 if (!strcmp(name, "memory.usage_in_bytes")) {
3847 event->register_event = mem_cgroup_usage_register_event;
3848 event->unregister_event = mem_cgroup_usage_unregister_event;
3849 } else if (!strcmp(name, "memory.oom_control")) {
3850 event->register_event = mem_cgroup_oom_register_event;
3851 event->unregister_event = mem_cgroup_oom_unregister_event;
3852 } else if (!strcmp(name, "memory.pressure_level")) {
3853 event->register_event = vmpressure_register_event;
3854 event->unregister_event = vmpressure_unregister_event;
3855 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3856 event->register_event = memsw_cgroup_usage_register_event;
3857 event->unregister_event = memsw_cgroup_usage_unregister_event;
3858 } else {
3859 ret = -EINVAL;
3860 goto out_put_cfile;
3861 }
3862
3863
3864
3865
3866
3867
3868 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3869 &memory_cgrp_subsys);
3870 ret = -EINVAL;
3871 if (IS_ERR(cfile_css))
3872 goto out_put_cfile;
3873 if (cfile_css != css) {
3874 css_put(cfile_css);
3875 goto out_put_cfile;
3876 }
3877
3878 ret = event->register_event(memcg, event->eventfd, buf);
3879 if (ret)
3880 goto out_put_css;
3881
3882 efile.file->f_op->poll(efile.file, &event->pt);
3883
3884 spin_lock(&memcg->event_list_lock);
3885 list_add(&event->list, &memcg->event_list);
3886 spin_unlock(&memcg->event_list_lock);
3887
3888 fdput(cfile);
3889 fdput(efile);
3890
3891 return nbytes;
3892
3893out_put_css:
3894 css_put(css);
3895out_put_cfile:
3896 fdput(cfile);
3897out_put_eventfd:
3898 eventfd_ctx_put(event->eventfd);
3899out_put_efile:
3900 fdput(efile);
3901out_kfree:
3902 kfree(event);
3903
3904 return ret;
3905}
3906
3907static struct cftype mem_cgroup_legacy_files[] = {
3908 {
3909 .name = "usage_in_bytes",
3910 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3911 .read_u64 = mem_cgroup_read_u64,
3912 },
3913 {
3914 .name = "max_usage_in_bytes",
3915 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3916 .write = mem_cgroup_reset,
3917 .read_u64 = mem_cgroup_read_u64,
3918 },
3919 {
3920 .name = "limit_in_bytes",
3921 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3922 .write = mem_cgroup_write,
3923 .read_u64 = mem_cgroup_read_u64,
3924 },
3925 {
3926 .name = "soft_limit_in_bytes",
3927 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3928 .write = mem_cgroup_write,
3929 .read_u64 = mem_cgroup_read_u64,
3930 },
3931 {
3932 .name = "failcnt",
3933 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3934 .write = mem_cgroup_reset,
3935 .read_u64 = mem_cgroup_read_u64,
3936 },
3937 {
3938 .name = "stat",
3939 .seq_show = memcg_stat_show,
3940 },
3941 {
3942 .name = "force_empty",
3943 .write = mem_cgroup_force_empty_write,
3944 },
3945 {
3946 .name = "use_hierarchy",
3947 .write_u64 = mem_cgroup_hierarchy_write,
3948 .read_u64 = mem_cgroup_hierarchy_read,
3949 },
3950 {
3951 .name = "cgroup.event_control",
3952 .write = memcg_write_event_control,
3953 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3954 },
3955 {
3956 .name = "swappiness",
3957 .read_u64 = mem_cgroup_swappiness_read,
3958 .write_u64 = mem_cgroup_swappiness_write,
3959 },
3960 {
3961 .name = "move_charge_at_immigrate",
3962 .read_u64 = mem_cgroup_move_charge_read,
3963 .write_u64 = mem_cgroup_move_charge_write,
3964 },
3965 {
3966 .name = "oom_control",
3967 .seq_show = mem_cgroup_oom_control_read,
3968 .write_u64 = mem_cgroup_oom_control_write,
3969 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3970 },
3971 {
3972 .name = "pressure_level",
3973 },
3974#ifdef CONFIG_NUMA
3975 {
3976 .name = "numa_stat",
3977 .seq_show = memcg_numa_stat_show,
3978 },
3979#endif
3980 {
3981 .name = "kmem.limit_in_bytes",
3982 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3983 .write = mem_cgroup_write,
3984 .read_u64 = mem_cgroup_read_u64,
3985 },
3986 {
3987 .name = "kmem.usage_in_bytes",
3988 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3989 .read_u64 = mem_cgroup_read_u64,
3990 },
3991 {
3992 .name = "kmem.failcnt",
3993 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3994 .write = mem_cgroup_reset,
3995 .read_u64 = mem_cgroup_read_u64,
3996 },
3997 {
3998 .name = "kmem.max_usage_in_bytes",
3999 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4000 .write = mem_cgroup_reset,
4001 .read_u64 = mem_cgroup_read_u64,
4002 },
4003#ifdef CONFIG_SLABINFO
4004 {
4005 .name = "kmem.slabinfo",
4006 .seq_start = slab_start,
4007 .seq_next = slab_next,
4008 .seq_stop = slab_stop,
4009 .seq_show = memcg_slab_show,
4010 },
4011#endif
4012 {
4013 .name = "kmem.tcp.limit_in_bytes",
4014 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4015 .write = mem_cgroup_write,
4016 .read_u64 = mem_cgroup_read_u64,
4017 },
4018 {
4019 .name = "kmem.tcp.usage_in_bytes",
4020 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4021 .read_u64 = mem_cgroup_read_u64,
4022 },
4023 {
4024 .name = "kmem.tcp.failcnt",
4025 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4026 .write = mem_cgroup_reset,
4027 .read_u64 = mem_cgroup_read_u64,
4028 },
4029 {
4030 .name = "kmem.tcp.max_usage_in_bytes",
4031 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4032 .write = mem_cgroup_reset,
4033 .read_u64 = mem_cgroup_read_u64,
4034 },
4035 { },
4036};
4037
4038static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4039{
4040 struct mem_cgroup_per_node *pn;
4041 struct mem_cgroup_per_zone *mz;
4042 int zone, tmp = node;
4043
4044
4045
4046
4047
4048
4049
4050
4051 if (!node_state(node, N_NORMAL_MEMORY))
4052 tmp = -1;
4053 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4054 if (!pn)
4055 return 1;
4056
4057 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4058 mz = &pn->zoneinfo[zone];
4059 lruvec_init(&mz->lruvec);
4060 mz->usage_in_excess = 0;
4061 mz->on_tree = false;
4062 mz->memcg = memcg;
4063 }
4064 memcg->nodeinfo[node] = pn;
4065 return 0;
4066}
4067
4068static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4069{
4070 kfree(memcg->nodeinfo[node]);
4071}
4072
4073static void mem_cgroup_free(struct mem_cgroup *memcg)
4074{
4075 int node;
4076
4077 memcg_wb_domain_exit(memcg);
4078 for_each_node(node)
4079 free_mem_cgroup_per_zone_info(memcg, node);
4080 free_percpu(memcg->stat);
4081 kfree(memcg);
4082}
4083
4084static struct mem_cgroup *mem_cgroup_alloc(void)
4085{
4086 struct mem_cgroup *memcg;
4087 size_t size;
4088 int node;
4089
4090 size = sizeof(struct mem_cgroup);
4091 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4092
4093 memcg = kzalloc(size, GFP_KERNEL);
4094 if (!memcg)
4095 return NULL;
4096
4097 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4098 if (!memcg->stat)
4099 goto fail;
4100
4101 for_each_node(node)
4102 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4103 goto fail;
4104
4105 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4106 goto fail;
4107
4108 INIT_WORK(&memcg->high_work, high_work_func);
4109 memcg->last_scanned_node = MAX_NUMNODES;
4110 INIT_LIST_HEAD(&memcg->oom_notify);
4111 mutex_init(&memcg->thresholds_lock);
4112 spin_lock_init(&memcg->move_lock);
4113 vmpressure_init(&memcg->vmpressure);
4114 INIT_LIST_HEAD(&memcg->event_list);
4115 spin_lock_init(&memcg->event_list_lock);
4116 memcg->socket_pressure = jiffies;
4117#ifndef CONFIG_SLOB
4118 memcg->kmemcg_id = -1;
4119#endif
4120#ifdef CONFIG_CGROUP_WRITEBACK
4121 INIT_LIST_HEAD(&memcg->cgwb_list);
4122#endif
4123 return memcg;
4124fail:
4125 mem_cgroup_free(memcg);
4126 return NULL;
4127}
4128
4129static struct cgroup_subsys_state * __ref
4130mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4131{
4132 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4133 struct mem_cgroup *memcg;
4134 long error = -ENOMEM;
4135
4136 memcg = mem_cgroup_alloc();
4137 if (!memcg)
4138 return ERR_PTR(error);
4139
4140 memcg->high = PAGE_COUNTER_MAX;
4141 memcg->soft_limit = PAGE_COUNTER_MAX;
4142 if (parent) {
4143 memcg->swappiness = mem_cgroup_swappiness(parent);
4144 memcg->oom_kill_disable = parent->oom_kill_disable;
4145 }
4146 if (parent && parent->use_hierarchy) {
4147 memcg->use_hierarchy = true;
4148 page_counter_init(&memcg->memory, &parent->memory);
4149 page_counter_init(&memcg->swap, &parent->swap);
4150 page_counter_init(&memcg->memsw, &parent->memsw);
4151 page_counter_init(&memcg->kmem, &parent->kmem);
4152 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4153 } else {
4154 page_counter_init(&memcg->memory, NULL);
4155 page_counter_init(&memcg->swap, NULL);
4156 page_counter_init(&memcg->memsw, NULL);
4157 page_counter_init(&memcg->kmem, NULL);
4158 page_counter_init(&memcg->tcpmem, NULL);
4159
4160
4161
4162
4163
4164 if (parent != root_mem_cgroup)
4165 memory_cgrp_subsys.broken_hierarchy = true;
4166 }
4167
4168
4169 if (!parent) {
4170 root_mem_cgroup = memcg;
4171 return &memcg->css;
4172 }
4173
4174 error = memcg_online_kmem(memcg);
4175 if (error)
4176 goto fail;
4177
4178 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4179 static_branch_inc(&memcg_sockets_enabled_key);
4180
4181 return &memcg->css;
4182fail:
4183 mem_cgroup_free(memcg);
4184 return NULL;
4185}
4186
4187static int
4188mem_cgroup_css_online(struct cgroup_subsys_state *css)
4189{
4190 if (css->id > MEM_CGROUP_ID_MAX)
4191 return -ENOSPC;
4192
4193 return 0;
4194}
4195
4196static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4197{
4198 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4199 struct mem_cgroup_event *event, *tmp;
4200
4201
4202
4203
4204
4205
4206 spin_lock(&memcg->event_list_lock);
4207 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4208 list_del_init(&event->list);
4209 schedule_work(&event->remove);
4210 }
4211 spin_unlock(&memcg->event_list_lock);
4212
4213 memcg_offline_kmem(memcg);
4214 wb_memcg_offline(memcg);
4215}
4216
4217static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4218{
4219 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4220
4221 invalidate_reclaim_iterators(memcg);
4222}
4223
4224static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4225{
4226 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4227
4228 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4229 static_branch_dec(&memcg_sockets_enabled_key);
4230
4231 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4232 static_branch_dec(&memcg_sockets_enabled_key);
4233
4234 vmpressure_cleanup(&memcg->vmpressure);
4235 cancel_work_sync(&memcg->high_work);
4236 mem_cgroup_remove_from_trees(memcg);
4237 memcg_free_kmem(memcg);
4238 mem_cgroup_free(memcg);
4239}
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4255{
4256 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4257
4258 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4259 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4260 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4261 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4262 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4263 memcg->low = 0;
4264 memcg->high = PAGE_COUNTER_MAX;
4265 memcg->soft_limit = PAGE_COUNTER_MAX;
4266 memcg_wb_domain_size_changed(memcg);
4267}
4268
4269#ifdef CONFIG_MMU
4270
4271static int mem_cgroup_do_precharge(unsigned long count)
4272{
4273 int ret;
4274
4275
4276 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4277 if (!ret) {
4278 mc.precharge += count;
4279 return ret;
4280 }
4281
4282
4283 while (count--) {
4284 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4285 if (ret)
4286 return ret;
4287 mc.precharge++;
4288 cond_resched();
4289 }
4290 return 0;
4291}
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311union mc_target {
4312 struct page *page;
4313 swp_entry_t ent;
4314};
4315
4316enum mc_target_type {
4317 MC_TARGET_NONE = 0,
4318 MC_TARGET_PAGE,
4319 MC_TARGET_SWAP,
4320};
4321
4322static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4323 unsigned long addr, pte_t ptent)
4324{
4325 struct page *page = vm_normal_page(vma, addr, ptent);
4326
4327 if (!page || !page_mapped(page))
4328 return NULL;
4329 if (PageAnon(page)) {
4330 if (!(mc.flags & MOVE_ANON))
4331 return NULL;
4332 } else {
4333 if (!(mc.flags & MOVE_FILE))
4334 return NULL;
4335 }
4336 if (!get_page_unless_zero(page))
4337 return NULL;
4338
4339 return page;
4340}
4341
4342#ifdef CONFIG_SWAP
4343static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4344 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4345{
4346 struct page *page = NULL;
4347 swp_entry_t ent = pte_to_swp_entry(ptent);
4348
4349 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4350 return NULL;
4351
4352
4353
4354
4355 page = find_get_page(swap_address_space(ent), ent.val);
4356 if (do_memsw_account())
4357 entry->val = ent.val;
4358
4359 return page;
4360}
4361#else
4362static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4363 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4364{
4365 return NULL;
4366}
4367#endif
4368
4369static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4370 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4371{
4372 struct page *page = NULL;
4373 struct address_space *mapping;
4374 pgoff_t pgoff;
4375
4376 if (!vma->vm_file)
4377 return NULL;
4378 if (!(mc.flags & MOVE_FILE))
4379 return NULL;
4380
4381 mapping = vma->vm_file->f_mapping;
4382 pgoff = linear_page_index(vma, addr);
4383
4384
4385#ifdef CONFIG_SWAP
4386
4387 if (shmem_mapping(mapping)) {
4388 page = find_get_entry(mapping, pgoff);
4389 if (radix_tree_exceptional_entry(page)) {
4390 swp_entry_t swp = radix_to_swp_entry(page);
4391 if (do_memsw_account())
4392 *entry = swp;
4393 page = find_get_page(swap_address_space(swp), swp.val);
4394 }
4395 } else
4396 page = find_get_page(mapping, pgoff);
4397#else
4398 page = find_get_page(mapping, pgoff);
4399#endif
4400 return page;
4401}
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415static int mem_cgroup_move_account(struct page *page,
4416 bool compound,
4417 struct mem_cgroup *from,
4418 struct mem_cgroup *to)
4419{
4420 unsigned long flags;
4421 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4422 int ret;
4423 bool anon;
4424
4425 VM_BUG_ON(from == to);
4426 VM_BUG_ON_PAGE(PageLRU(page), page);
4427 VM_BUG_ON(compound && !PageTransHuge(page));
4428
4429
4430
4431
4432
4433 ret = -EBUSY;
4434 if (!trylock_page(page))
4435 goto out;
4436
4437 ret = -EINVAL;
4438 if (page->mem_cgroup != from)
4439 goto out_unlock;
4440
4441 anon = PageAnon(page);
4442
4443 spin_lock_irqsave(&from->move_lock, flags);
4444
4445 if (!anon && page_mapped(page)) {
4446 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4447 nr_pages);
4448 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4449 nr_pages);
4450 }
4451
4452
4453
4454
4455
4456
4457 if (!anon && PageDirty(page)) {
4458 struct address_space *mapping = page_mapping(page);
4459
4460 if (mapping_cap_account_dirty(mapping)) {
4461 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4462 nr_pages);
4463 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4464 nr_pages);
4465 }
4466 }
4467
4468 if (PageWriteback(page)) {
4469 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4470 nr_pages);
4471 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4472 nr_pages);
4473 }
4474
4475
4476
4477
4478
4479
4480
4481
4482 page->mem_cgroup = to;
4483 spin_unlock_irqrestore(&from->move_lock, flags);
4484
4485 ret = 0;
4486
4487 local_irq_disable();
4488 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4489 memcg_check_events(to, page);
4490 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4491 memcg_check_events(from, page);
4492 local_irq_enable();
4493out_unlock:
4494 unlock_page(page);
4495out:
4496 return ret;
4497}
4498
4499static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4500 unsigned long addr, pte_t ptent, union mc_target *target)
4501{
4502 struct page *page = NULL;
4503 enum mc_target_type ret = MC_TARGET_NONE;
4504 swp_entry_t ent = { .val = 0 };
4505
4506 if (pte_present(ptent))
4507 page = mc_handle_present_pte(vma, addr, ptent);
4508 else if (is_swap_pte(ptent))
4509 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4510 else if (pte_none(ptent))
4511 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4512
4513 if (!page && !ent.val)
4514 return ret;
4515 if (page) {
4516
4517
4518
4519
4520
4521 if (page->mem_cgroup == mc.from) {
4522 ret = MC_TARGET_PAGE;
4523 if (target)
4524 target->page = page;
4525 }
4526 if (!ret || !target)
4527 put_page(page);
4528 }
4529
4530 if (ent.val && !ret &&
4531 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4532 ret = MC_TARGET_SWAP;
4533 if (target)
4534 target->ent = ent;
4535 }
4536 return ret;
4537}
4538
4539#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4540
4541
4542
4543
4544
4545static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4546 unsigned long addr, pmd_t pmd, union mc_target *target)
4547{
4548 struct page *page = NULL;
4549 enum mc_target_type ret = MC_TARGET_NONE;
4550
4551 page = pmd_page(pmd);
4552 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4553 if (!(mc.flags & MOVE_ANON))
4554 return ret;
4555 if (page->mem_cgroup == mc.from) {
4556 ret = MC_TARGET_PAGE;
4557 if (target) {
4558 get_page(page);
4559 target->page = page;
4560 }
4561 }
4562 return ret;
4563}
4564#else
4565static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4566 unsigned long addr, pmd_t pmd, union mc_target *target)
4567{
4568 return MC_TARGET_NONE;
4569}
4570#endif
4571
4572static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4573 unsigned long addr, unsigned long end,
4574 struct mm_walk *walk)
4575{
4576 struct vm_area_struct *vma = walk->vma;
4577 pte_t *pte;
4578 spinlock_t *ptl;
4579
4580 ptl = pmd_trans_huge_lock(pmd, vma);
4581 if (ptl) {
4582 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4583 mc.precharge += HPAGE_PMD_NR;
4584 spin_unlock(ptl);
4585 return 0;
4586 }
4587
4588 if (pmd_trans_unstable(pmd))
4589 return 0;
4590 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4591 for (; addr != end; pte++, addr += PAGE_SIZE)
4592 if (get_mctgt_type(vma, addr, *pte, NULL))
4593 mc.precharge++;
4594 pte_unmap_unlock(pte - 1, ptl);
4595 cond_resched();
4596
4597 return 0;
4598}
4599
4600static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4601{
4602 unsigned long precharge;
4603
4604 struct mm_walk mem_cgroup_count_precharge_walk = {
4605 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4606 .mm = mm,
4607 };
4608 down_read(&mm->mmap_sem);
4609 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4610 up_read(&mm->mmap_sem);
4611
4612 precharge = mc.precharge;
4613 mc.precharge = 0;
4614
4615 return precharge;
4616}
4617
4618static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4619{
4620 unsigned long precharge = mem_cgroup_count_precharge(mm);
4621
4622 VM_BUG_ON(mc.moving_task);
4623 mc.moving_task = current;
4624 return mem_cgroup_do_precharge(precharge);
4625}
4626
4627
4628static void __mem_cgroup_clear_mc(void)
4629{
4630 struct mem_cgroup *from = mc.from;
4631 struct mem_cgroup *to = mc.to;
4632
4633
4634 if (mc.precharge) {
4635 cancel_charge(mc.to, mc.precharge);
4636 mc.precharge = 0;
4637 }
4638
4639
4640
4641
4642 if (mc.moved_charge) {
4643 cancel_charge(mc.from, mc.moved_charge);
4644 mc.moved_charge = 0;
4645 }
4646
4647 if (mc.moved_swap) {
4648
4649 if (!mem_cgroup_is_root(mc.from))
4650 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4651
4652
4653
4654
4655
4656 if (!mem_cgroup_is_root(mc.to))
4657 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4658
4659 css_put_many(&mc.from->css, mc.moved_swap);
4660
4661
4662 mc.moved_swap = 0;
4663 }
4664 memcg_oom_recover(from);
4665 memcg_oom_recover(to);
4666 wake_up_all(&mc.waitq);
4667}
4668
4669static void mem_cgroup_clear_mc(void)
4670{
4671 struct mm_struct *mm = mc.mm;
4672
4673
4674
4675
4676
4677 mc.moving_task = NULL;
4678 __mem_cgroup_clear_mc();
4679 spin_lock(&mc.lock);
4680 mc.from = NULL;
4681 mc.to = NULL;
4682 mc.mm = NULL;
4683 spin_unlock(&mc.lock);
4684
4685 mmput(mm);
4686}
4687
4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4689{
4690 struct cgroup_subsys_state *css;
4691 struct mem_cgroup *memcg = NULL;
4692 struct mem_cgroup *from;
4693 struct task_struct *leader, *p;
4694 struct mm_struct *mm;
4695 unsigned long move_flags;
4696 int ret = 0;
4697
4698
4699 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4700 return 0;
4701
4702
4703
4704
4705
4706
4707
4708 p = NULL;
4709 cgroup_taskset_for_each_leader(leader, css, tset) {
4710 WARN_ON_ONCE(p);
4711 p = leader;
4712 memcg = mem_cgroup_from_css(css);
4713 }
4714 if (!p)
4715 return 0;
4716
4717
4718
4719
4720
4721
4722 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4723 if (!move_flags)
4724 return 0;
4725
4726 from = mem_cgroup_from_task(p);
4727
4728 VM_BUG_ON(from == memcg);
4729
4730 mm = get_task_mm(p);
4731 if (!mm)
4732 return 0;
4733
4734 if (mm->owner == p) {
4735 VM_BUG_ON(mc.from);
4736 VM_BUG_ON(mc.to);
4737 VM_BUG_ON(mc.precharge);
4738 VM_BUG_ON(mc.moved_charge);
4739 VM_BUG_ON(mc.moved_swap);
4740
4741 spin_lock(&mc.lock);
4742 mc.mm = mm;
4743 mc.from = from;
4744 mc.to = memcg;
4745 mc.flags = move_flags;
4746 spin_unlock(&mc.lock);
4747
4748
4749 ret = mem_cgroup_precharge_mc(mm);
4750 if (ret)
4751 mem_cgroup_clear_mc();
4752 } else {
4753 mmput(mm);
4754 }
4755 return ret;
4756}
4757
4758static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4759{
4760 if (mc.to)
4761 mem_cgroup_clear_mc();
4762}
4763
4764static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4765 unsigned long addr, unsigned long end,
4766 struct mm_walk *walk)
4767{
4768 int ret = 0;
4769 struct vm_area_struct *vma = walk->vma;
4770 pte_t *pte;
4771 spinlock_t *ptl;
4772 enum mc_target_type target_type;
4773 union mc_target target;
4774 struct page *page;
4775
4776 ptl = pmd_trans_huge_lock(pmd, vma);
4777 if (ptl) {
4778 if (mc.precharge < HPAGE_PMD_NR) {
4779 spin_unlock(ptl);
4780 return 0;
4781 }
4782 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4783 if (target_type == MC_TARGET_PAGE) {
4784 page = target.page;
4785 if (!isolate_lru_page(page)) {
4786 if (!mem_cgroup_move_account(page, true,
4787 mc.from, mc.to)) {
4788 mc.precharge -= HPAGE_PMD_NR;
4789 mc.moved_charge += HPAGE_PMD_NR;
4790 }
4791 putback_lru_page(page);
4792 }
4793 put_page(page);
4794 }
4795 spin_unlock(ptl);
4796 return 0;
4797 }
4798
4799 if (pmd_trans_unstable(pmd))
4800 return 0;
4801retry:
4802 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4803 for (; addr != end; addr += PAGE_SIZE) {
4804 pte_t ptent = *(pte++);
4805 swp_entry_t ent;
4806
4807 if (!mc.precharge)
4808 break;
4809
4810 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4811 case MC_TARGET_PAGE:
4812 page = target.page;
4813
4814
4815
4816
4817
4818
4819 if (PageTransCompound(page))
4820 goto put;
4821 if (isolate_lru_page(page))
4822 goto put;
4823 if (!mem_cgroup_move_account(page, false,
4824 mc.from, mc.to)) {
4825 mc.precharge--;
4826
4827 mc.moved_charge++;
4828 }
4829 putback_lru_page(page);
4830put:
4831 put_page(page);
4832 break;
4833 case MC_TARGET_SWAP:
4834 ent = target.ent;
4835 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4836 mc.precharge--;
4837
4838 mc.moved_swap++;
4839 }
4840 break;
4841 default:
4842 break;
4843 }
4844 }
4845 pte_unmap_unlock(pte - 1, ptl);
4846 cond_resched();
4847
4848 if (addr != end) {
4849
4850
4851
4852
4853
4854
4855 ret = mem_cgroup_do_precharge(1);
4856 if (!ret)
4857 goto retry;
4858 }
4859
4860 return ret;
4861}
4862
4863static void mem_cgroup_move_charge(void)
4864{
4865 struct mm_walk mem_cgroup_move_charge_walk = {
4866 .pmd_entry = mem_cgroup_move_charge_pte_range,
4867 .mm = mc.mm,
4868 };
4869
4870 lru_add_drain_all();
4871
4872
4873
4874
4875
4876 atomic_inc(&mc.from->moving_account);
4877 synchronize_rcu();
4878retry:
4879 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4880
4881
4882
4883
4884
4885
4886
4887 __mem_cgroup_clear_mc();
4888 cond_resched();
4889 goto retry;
4890 }
4891
4892
4893
4894
4895 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4896 up_read(&mc.mm->mmap_sem);
4897 atomic_dec(&mc.from->moving_account);
4898}
4899
4900static void mem_cgroup_move_task(void)
4901{
4902 if (mc.to) {
4903 mem_cgroup_move_charge();
4904 mem_cgroup_clear_mc();
4905 }
4906}
4907#else
4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4909{
4910 return 0;
4911}
4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4913{
4914}
4915static void mem_cgroup_move_task(void)
4916{
4917}
4918#endif
4919
4920
4921
4922
4923
4924
4925static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4926{
4927
4928
4929
4930
4931
4932 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4933 root_mem_cgroup->use_hierarchy = true;
4934 else
4935 root_mem_cgroup->use_hierarchy = false;
4936}
4937
4938static u64 memory_current_read(struct cgroup_subsys_state *css,
4939 struct cftype *cft)
4940{
4941 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4942
4943 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4944}
4945
4946static int memory_low_show(struct seq_file *m, void *v)
4947{
4948 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4949 unsigned long low = READ_ONCE(memcg->low);
4950
4951 if (low == PAGE_COUNTER_MAX)
4952 seq_puts(m, "max\n");
4953 else
4954 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4955
4956 return 0;
4957}
4958
4959static ssize_t memory_low_write(struct kernfs_open_file *of,
4960 char *buf, size_t nbytes, loff_t off)
4961{
4962 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4963 unsigned long low;
4964 int err;
4965
4966 buf = strstrip(buf);
4967 err = page_counter_memparse(buf, "max", &low);
4968 if (err)
4969 return err;
4970
4971 memcg->low = low;
4972
4973 return nbytes;
4974}
4975
4976static int memory_high_show(struct seq_file *m, void *v)
4977{
4978 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4979 unsigned long high = READ_ONCE(memcg->high);
4980
4981 if (high == PAGE_COUNTER_MAX)
4982 seq_puts(m, "max\n");
4983 else
4984 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
4985
4986 return 0;
4987}
4988
4989static ssize_t memory_high_write(struct kernfs_open_file *of,
4990 char *buf, size_t nbytes, loff_t off)
4991{
4992 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4993 unsigned long nr_pages;
4994 unsigned long high;
4995 int err;
4996
4997 buf = strstrip(buf);
4998 err = page_counter_memparse(buf, "max", &high);
4999 if (err)
5000 return err;
5001
5002 memcg->high = high;
5003
5004 nr_pages = page_counter_read(&memcg->memory);
5005 if (nr_pages > high)
5006 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5007 GFP_KERNEL, true);
5008
5009 memcg_wb_domain_size_changed(memcg);
5010 return nbytes;
5011}
5012
5013static int memory_max_show(struct seq_file *m, void *v)
5014{
5015 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5016 unsigned long max = READ_ONCE(memcg->memory.limit);
5017
5018 if (max == PAGE_COUNTER_MAX)
5019 seq_puts(m, "max\n");
5020 else
5021 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5022
5023 return 0;
5024}
5025
5026static ssize_t memory_max_write(struct kernfs_open_file *of,
5027 char *buf, size_t nbytes, loff_t off)
5028{
5029 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5030 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5031 bool drained = false;
5032 unsigned long max;
5033 int err;
5034
5035 buf = strstrip(buf);
5036 err = page_counter_memparse(buf, "max", &max);
5037 if (err)
5038 return err;
5039
5040 xchg(&memcg->memory.limit, max);
5041
5042 for (;;) {
5043 unsigned long nr_pages = page_counter_read(&memcg->memory);
5044
5045 if (nr_pages <= max)
5046 break;
5047
5048 if (signal_pending(current)) {
5049 err = -EINTR;
5050 break;
5051 }
5052
5053 if (!drained) {
5054 drain_all_stock(memcg);
5055 drained = true;
5056 continue;
5057 }
5058
5059 if (nr_reclaims) {
5060 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5061 GFP_KERNEL, true))
5062 nr_reclaims--;
5063 continue;
5064 }
5065
5066 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5067 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5068 break;
5069 }
5070
5071 memcg_wb_domain_size_changed(memcg);
5072 return nbytes;
5073}
5074
5075static int memory_events_show(struct seq_file *m, void *v)
5076{
5077 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5078
5079 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5080 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5081 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5082 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5083
5084 return 0;
5085}
5086
5087static int memory_stat_show(struct seq_file *m, void *v)
5088{
5089 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5090 unsigned long stat[MEMCG_NR_STAT];
5091 unsigned long events[MEMCG_NR_EVENTS];
5092 int i;
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105 tree_stat(memcg, stat);
5106 tree_events(memcg, events);
5107
5108 seq_printf(m, "anon %llu\n",
5109 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5110 seq_printf(m, "file %llu\n",
5111 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5112 seq_printf(m, "kernel_stack %llu\n",
5113 (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5114 seq_printf(m, "slab %llu\n",
5115 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5116 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5117 seq_printf(m, "sock %llu\n",
5118 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5119
5120 seq_printf(m, "file_mapped %llu\n",
5121 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5122 seq_printf(m, "file_dirty %llu\n",
5123 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5124 seq_printf(m, "file_writeback %llu\n",
5125 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5126
5127 for (i = 0; i < NR_LRU_LISTS; i++) {
5128 struct mem_cgroup *mi;
5129 unsigned long val = 0;
5130
5131 for_each_mem_cgroup_tree(mi, memcg)
5132 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5133 seq_printf(m, "%s %llu\n",
5134 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5135 }
5136
5137 seq_printf(m, "slab_reclaimable %llu\n",
5138 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5139 seq_printf(m, "slab_unreclaimable %llu\n",
5140 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5141
5142
5143
5144 seq_printf(m, "pgfault %lu\n",
5145 events[MEM_CGROUP_EVENTS_PGFAULT]);
5146 seq_printf(m, "pgmajfault %lu\n",
5147 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5148
5149 return 0;
5150}
5151
5152static struct cftype memory_files[] = {
5153 {
5154 .name = "current",
5155 .flags = CFTYPE_NOT_ON_ROOT,
5156 .read_u64 = memory_current_read,
5157 },
5158 {
5159 .name = "low",
5160 .flags = CFTYPE_NOT_ON_ROOT,
5161 .seq_show = memory_low_show,
5162 .write = memory_low_write,
5163 },
5164 {
5165 .name = "high",
5166 .flags = CFTYPE_NOT_ON_ROOT,
5167 .seq_show = memory_high_show,
5168 .write = memory_high_write,
5169 },
5170 {
5171 .name = "max",
5172 .flags = CFTYPE_NOT_ON_ROOT,
5173 .seq_show = memory_max_show,
5174 .write = memory_max_write,
5175 },
5176 {
5177 .name = "events",
5178 .flags = CFTYPE_NOT_ON_ROOT,
5179 .file_offset = offsetof(struct mem_cgroup, events_file),
5180 .seq_show = memory_events_show,
5181 },
5182 {
5183 .name = "stat",
5184 .flags = CFTYPE_NOT_ON_ROOT,
5185 .seq_show = memory_stat_show,
5186 },
5187 { }
5188};
5189
5190struct cgroup_subsys memory_cgrp_subsys = {
5191 .css_alloc = mem_cgroup_css_alloc,
5192 .css_online = mem_cgroup_css_online,
5193 .css_offline = mem_cgroup_css_offline,
5194 .css_released = mem_cgroup_css_released,
5195 .css_free = mem_cgroup_css_free,
5196 .css_reset = mem_cgroup_css_reset,
5197 .can_attach = mem_cgroup_can_attach,
5198 .cancel_attach = mem_cgroup_cancel_attach,
5199 .post_attach = mem_cgroup_move_task,
5200 .bind = mem_cgroup_bind,
5201 .dfl_cftypes = memory_files,
5202 .legacy_cftypes = mem_cgroup_legacy_files,
5203 .early_init = 0,
5204};
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5215{
5216 if (mem_cgroup_disabled())
5217 return false;
5218
5219
5220
5221
5222
5223
5224
5225 if (memcg == root_mem_cgroup)
5226 return false;
5227
5228 if (page_counter_read(&memcg->memory) >= memcg->low)
5229 return false;
5230
5231 while (memcg != root) {
5232 memcg = parent_mem_cgroup(memcg);
5233
5234 if (memcg == root_mem_cgroup)
5235 break;
5236
5237 if (page_counter_read(&memcg->memory) >= memcg->low)
5238 return false;
5239 }
5240 return true;
5241}
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5261 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5262 bool compound)
5263{
5264 struct mem_cgroup *memcg = NULL;
5265 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5266 int ret = 0;
5267
5268 if (mem_cgroup_disabled())
5269 goto out;
5270
5271 if (PageSwapCache(page)) {
5272
5273
5274
5275
5276
5277
5278
5279 VM_BUG_ON_PAGE(!PageLocked(page), page);
5280 if (page->mem_cgroup)
5281 goto out;
5282
5283 if (do_swap_account) {
5284 swp_entry_t ent = { .val = page_private(page), };
5285 unsigned short id = lookup_swap_cgroup_id(ent);
5286
5287 rcu_read_lock();
5288 memcg = mem_cgroup_from_id(id);
5289 if (memcg && !css_tryget_online(&memcg->css))
5290 memcg = NULL;
5291 rcu_read_unlock();
5292 }
5293 }
5294
5295 if (!memcg)
5296 memcg = get_mem_cgroup_from_mm(mm);
5297
5298 ret = try_charge(memcg, gfp_mask, nr_pages);
5299
5300 css_put(&memcg->css);
5301out:
5302 *memcgp = memcg;
5303 return ret;
5304}
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5323 bool lrucare, bool compound)
5324{
5325 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5326
5327 VM_BUG_ON_PAGE(!page->mapping, page);
5328 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5329
5330 if (mem_cgroup_disabled())
5331 return;
5332
5333
5334
5335
5336
5337 if (!memcg)
5338 return;
5339
5340 commit_charge(page, memcg, lrucare);
5341
5342 local_irq_disable();
5343 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5344 memcg_check_events(memcg, page);
5345 local_irq_enable();
5346
5347 if (do_memsw_account() && PageSwapCache(page)) {
5348 swp_entry_t entry = { .val = page_private(page) };
5349
5350
5351
5352
5353
5354 mem_cgroup_uncharge_swap(entry);
5355 }
5356}
5357
5358
5359
5360
5361
5362
5363
5364
5365void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5366 bool compound)
5367{
5368 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5369
5370 if (mem_cgroup_disabled())
5371 return;
5372
5373
5374
5375
5376
5377 if (!memcg)
5378 return;
5379
5380 cancel_charge(memcg, nr_pages);
5381}
5382
5383static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5384 unsigned long nr_anon, unsigned long nr_file,
5385 unsigned long nr_huge, struct page *dummy_page)
5386{
5387 unsigned long nr_pages = nr_anon + nr_file;
5388 unsigned long flags;
5389
5390 if (!mem_cgroup_is_root(memcg)) {
5391 page_counter_uncharge(&memcg->memory, nr_pages);
5392 if (do_memsw_account())
5393 page_counter_uncharge(&memcg->memsw, nr_pages);
5394 memcg_oom_recover(memcg);
5395 }
5396
5397 local_irq_save(flags);
5398 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5399 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5400 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5401 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5402 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5403 memcg_check_events(memcg, dummy_page);
5404 local_irq_restore(flags);
5405
5406 if (!mem_cgroup_is_root(memcg))
5407 css_put_many(&memcg->css, nr_pages);
5408}
5409
5410static void uncharge_list(struct list_head *page_list)
5411{
5412 struct mem_cgroup *memcg = NULL;
5413 unsigned long nr_anon = 0;
5414 unsigned long nr_file = 0;
5415 unsigned long nr_huge = 0;
5416 unsigned long pgpgout = 0;
5417 struct list_head *next;
5418 struct page *page;
5419
5420
5421
5422
5423
5424 next = page_list->next;
5425 do {
5426 unsigned int nr_pages = 1;
5427
5428 page = list_entry(next, struct page, lru);
5429 next = page->lru.next;
5430
5431 VM_BUG_ON_PAGE(PageLRU(page), page);
5432 VM_BUG_ON_PAGE(page_count(page), page);
5433
5434 if (!page->mem_cgroup)
5435 continue;
5436
5437
5438
5439
5440
5441
5442
5443 if (memcg != page->mem_cgroup) {
5444 if (memcg) {
5445 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5446 nr_huge, page);
5447 pgpgout = nr_anon = nr_file = nr_huge = 0;
5448 }
5449 memcg = page->mem_cgroup;
5450 }
5451
5452 if (PageTransHuge(page)) {
5453 nr_pages <<= compound_order(page);
5454 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5455 nr_huge += nr_pages;
5456 }
5457
5458 if (PageAnon(page))
5459 nr_anon += nr_pages;
5460 else
5461 nr_file += nr_pages;
5462
5463 page->mem_cgroup = NULL;
5464
5465 pgpgout++;
5466 } while (next != page_list);
5467
5468 if (memcg)
5469 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5470 nr_huge, page);
5471}
5472
5473
5474
5475
5476
5477
5478
5479
5480void mem_cgroup_uncharge(struct page *page)
5481{
5482 if (mem_cgroup_disabled())
5483 return;
5484
5485
5486 if (!page->mem_cgroup)
5487 return;
5488
5489 INIT_LIST_HEAD(&page->lru);
5490 uncharge_list(&page->lru);
5491}
5492
5493
5494
5495
5496
5497
5498
5499
5500void mem_cgroup_uncharge_list(struct list_head *page_list)
5501{
5502 if (mem_cgroup_disabled())
5503 return;
5504
5505 if (!list_empty(page_list))
5506 uncharge_list(page_list);
5507}
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5520{
5521 struct mem_cgroup *memcg;
5522 unsigned int nr_pages;
5523 bool compound;
5524
5525 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5526 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5527 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5528 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5529 newpage);
5530
5531 if (mem_cgroup_disabled())
5532 return;
5533
5534
5535 if (newpage->mem_cgroup)
5536 return;
5537
5538
5539 memcg = oldpage->mem_cgroup;
5540 if (!memcg)
5541 return;
5542
5543
5544 compound = PageTransHuge(newpage);
5545 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5546
5547 page_counter_charge(&memcg->memory, nr_pages);
5548 if (do_memsw_account())
5549 page_counter_charge(&memcg->memsw, nr_pages);
5550 css_get_many(&memcg->css, nr_pages);
5551
5552 commit_charge(newpage, memcg, false);
5553
5554 local_irq_disable();
5555 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5556 memcg_check_events(memcg, newpage);
5557 local_irq_enable();
5558}
5559
5560DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5561EXPORT_SYMBOL(memcg_sockets_enabled_key);
5562
5563void sock_update_memcg(struct sock *sk)
5564{
5565 struct mem_cgroup *memcg;
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575 if (sk->sk_memcg) {
5576 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5577 css_get(&sk->sk_memcg->css);
5578 return;
5579 }
5580
5581 rcu_read_lock();
5582 memcg = mem_cgroup_from_task(current);
5583 if (memcg == root_mem_cgroup)
5584 goto out;
5585 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5586 goto out;
5587 if (css_tryget_online(&memcg->css))
5588 sk->sk_memcg = memcg;
5589out:
5590 rcu_read_unlock();
5591}
5592EXPORT_SYMBOL(sock_update_memcg);
5593
5594void sock_release_memcg(struct sock *sk)
5595{
5596 WARN_ON(!sk->sk_memcg);
5597 css_put(&sk->sk_memcg->css);
5598}
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5609{
5610 gfp_t gfp_mask = GFP_KERNEL;
5611
5612 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5613 struct page_counter *fail;
5614
5615 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5616 memcg->tcpmem_pressure = 0;
5617 return true;
5618 }
5619 page_counter_charge(&memcg->tcpmem, nr_pages);
5620 memcg->tcpmem_pressure = 1;
5621 return false;
5622 }
5623
5624
5625 if (in_softirq())
5626 gfp_mask = GFP_NOWAIT;
5627
5628 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5629
5630 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5631 return true;
5632
5633 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5634 return false;
5635}
5636
5637
5638
5639
5640
5641
5642void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5643{
5644 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5645 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5646 return;
5647 }
5648
5649 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5650
5651 page_counter_uncharge(&memcg->memory, nr_pages);
5652 css_put_many(&memcg->css, nr_pages);
5653}
5654
5655static int __init cgroup_memory(char *s)
5656{
5657 char *token;
5658
5659 while ((token = strsep(&s, ",")) != NULL) {
5660 if (!*token)
5661 continue;
5662 if (!strcmp(token, "nosocket"))
5663 cgroup_memory_nosocket = true;
5664 if (!strcmp(token, "nokmem"))
5665 cgroup_memory_nokmem = true;
5666 }
5667 return 0;
5668}
5669__setup("cgroup.memory=", cgroup_memory);
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679static int __init mem_cgroup_init(void)
5680{
5681 int cpu, node;
5682
5683 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5684
5685 for_each_possible_cpu(cpu)
5686 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5687 drain_local_stock);
5688
5689 for_each_node(node) {
5690 struct mem_cgroup_tree_per_node *rtpn;
5691 int zone;
5692
5693 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5694 node_online(node) ? node : NUMA_NO_NODE);
5695
5696 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5697 struct mem_cgroup_tree_per_zone *rtpz;
5698
5699 rtpz = &rtpn->rb_tree_per_zone[zone];
5700 rtpz->rb_root = RB_ROOT;
5701 spin_lock_init(&rtpz->lock);
5702 }
5703 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5704 }
5705
5706 return 0;
5707}
5708subsys_initcall(mem_cgroup_init);
5709
5710#ifdef CONFIG_MEMCG_SWAP
5711
5712
5713
5714
5715
5716
5717
5718void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5719{
5720 struct mem_cgroup *memcg;
5721 unsigned short oldid;
5722
5723 VM_BUG_ON_PAGE(PageLRU(page), page);
5724 VM_BUG_ON_PAGE(page_count(page), page);
5725
5726 if (!do_memsw_account())
5727 return;
5728
5729 memcg = page->mem_cgroup;
5730
5731
5732 if (!memcg)
5733 return;
5734
5735 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5736 VM_BUG_ON_PAGE(oldid, page);
5737 mem_cgroup_swap_statistics(memcg, true);
5738
5739 page->mem_cgroup = NULL;
5740
5741 if (!mem_cgroup_is_root(memcg))
5742 page_counter_uncharge(&memcg->memory, 1);
5743
5744
5745
5746
5747
5748
5749
5750 VM_BUG_ON(!irqs_disabled());
5751 mem_cgroup_charge_statistics(memcg, page, false, -1);
5752 memcg_check_events(memcg, page);
5753}
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5765{
5766 struct mem_cgroup *memcg;
5767 struct page_counter *counter;
5768 unsigned short oldid;
5769
5770 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5771 return 0;
5772
5773 memcg = page->mem_cgroup;
5774
5775
5776 if (!memcg)
5777 return 0;
5778
5779 if (!mem_cgroup_is_root(memcg) &&
5780 !page_counter_try_charge(&memcg->swap, 1, &counter))
5781 return -ENOMEM;
5782
5783 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5784 VM_BUG_ON_PAGE(oldid, page);
5785 mem_cgroup_swap_statistics(memcg, true);
5786
5787 css_get(&memcg->css);
5788 return 0;
5789}
5790
5791
5792
5793
5794
5795
5796
5797void mem_cgroup_uncharge_swap(swp_entry_t entry)
5798{
5799 struct mem_cgroup *memcg;
5800 unsigned short id;
5801
5802 if (!do_swap_account)
5803 return;
5804
5805 id = swap_cgroup_record(entry, 0);
5806 rcu_read_lock();
5807 memcg = mem_cgroup_from_id(id);
5808 if (memcg) {
5809 if (!mem_cgroup_is_root(memcg)) {
5810 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5811 page_counter_uncharge(&memcg->swap, 1);
5812 else
5813 page_counter_uncharge(&memcg->memsw, 1);
5814 }
5815 mem_cgroup_swap_statistics(memcg, false);
5816 css_put(&memcg->css);
5817 }
5818 rcu_read_unlock();
5819}
5820
5821long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5822{
5823 long nr_swap_pages = get_nr_swap_pages();
5824
5825 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5826 return nr_swap_pages;
5827 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5828 nr_swap_pages = min_t(long, nr_swap_pages,
5829 READ_ONCE(memcg->swap.limit) -
5830 page_counter_read(&memcg->swap));
5831 return nr_swap_pages;
5832}
5833
5834bool mem_cgroup_swap_full(struct page *page)
5835{
5836 struct mem_cgroup *memcg;
5837
5838 VM_BUG_ON_PAGE(!PageLocked(page), page);
5839
5840 if (vm_swap_full())
5841 return true;
5842 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5843 return false;
5844
5845 memcg = page->mem_cgroup;
5846 if (!memcg)
5847 return false;
5848
5849 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5850 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5851 return true;
5852
5853 return false;
5854}
5855
5856
5857#ifdef CONFIG_MEMCG_SWAP_ENABLED
5858static int really_do_swap_account __initdata = 1;
5859#else
5860static int really_do_swap_account __initdata;
5861#endif
5862
5863static int __init enable_swap_account(char *s)
5864{
5865 if (!strcmp(s, "1"))
5866 really_do_swap_account = 1;
5867 else if (!strcmp(s, "0"))
5868 really_do_swap_account = 0;
5869 return 1;
5870}
5871__setup("swapaccount=", enable_swap_account);
5872
5873static u64 swap_current_read(struct cgroup_subsys_state *css,
5874 struct cftype *cft)
5875{
5876 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5877
5878 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5879}
5880
5881static int swap_max_show(struct seq_file *m, void *v)
5882{
5883 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5884 unsigned long max = READ_ONCE(memcg->swap.limit);
5885
5886 if (max == PAGE_COUNTER_MAX)
5887 seq_puts(m, "max\n");
5888 else
5889 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5890
5891 return 0;
5892}
5893
5894static ssize_t swap_max_write(struct kernfs_open_file *of,
5895 char *buf, size_t nbytes, loff_t off)
5896{
5897 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5898 unsigned long max;
5899 int err;
5900
5901 buf = strstrip(buf);
5902 err = page_counter_memparse(buf, "max", &max);
5903 if (err)
5904 return err;
5905
5906 mutex_lock(&memcg_limit_mutex);
5907 err = page_counter_limit(&memcg->swap, max);
5908 mutex_unlock(&memcg_limit_mutex);
5909 if (err)
5910 return err;
5911
5912 return nbytes;
5913}
5914
5915static struct cftype swap_files[] = {
5916 {
5917 .name = "swap.current",
5918 .flags = CFTYPE_NOT_ON_ROOT,
5919 .read_u64 = swap_current_read,
5920 },
5921 {
5922 .name = "swap.max",
5923 .flags = CFTYPE_NOT_ON_ROOT,
5924 .seq_show = swap_max_show,
5925 .write = swap_max_write,
5926 },
5927 { }
5928};
5929
5930static struct cftype memsw_cgroup_files[] = {
5931 {
5932 .name = "memsw.usage_in_bytes",
5933 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5934 .read_u64 = mem_cgroup_read_u64,
5935 },
5936 {
5937 .name = "memsw.max_usage_in_bytes",
5938 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5939 .write = mem_cgroup_reset,
5940 .read_u64 = mem_cgroup_read_u64,
5941 },
5942 {
5943 .name = "memsw.limit_in_bytes",
5944 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5945 .write = mem_cgroup_write,
5946 .read_u64 = mem_cgroup_read_u64,
5947 },
5948 {
5949 .name = "memsw.failcnt",
5950 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5951 .write = mem_cgroup_reset,
5952 .read_u64 = mem_cgroup_read_u64,
5953 },
5954 { },
5955};
5956
5957static int __init mem_cgroup_swap_init(void)
5958{
5959 if (!mem_cgroup_disabled() && really_do_swap_account) {
5960 do_swap_account = 1;
5961 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5962 swap_files));
5963 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5964 memsw_cgroup_files));
5965 }
5966 return 0;
5967}
5968subsys_initcall(mem_cgroup_swap_init);
5969
5970#endif
5971