1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/hugetlb.h>
39#include <linux/pagemap.h>
40#include <linux/smp.h>
41#include <linux/page-flags.h>
42#include <linux/backing-dev.h>
43#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
45#include <linux/limits.h>
46#include <linux/export.h>
47#include <linux/mutex.h>
48#include <linux/rbtree.h>
49#include <linux/slab.h>
50#include <linux/swap.h>
51#include <linux/swapops.h>
52#include <linux/spinlock.h>
53#include <linux/eventfd.h>
54#include <linux/poll.h>
55#include <linux/sort.h>
56#include <linux/fs.h>
57#include <linux/seq_file.h>
58#include <linux/vmpressure.h>
59#include <linux/mm_inline.h>
60#include <linux/swap_cgroup.h>
61#include <linux/cpu.h>
62#include <linux/oom.h>
63#include <linux/lockdep.h>
64#include <linux/file.h>
65#include "internal.h"
66#include <net/sock.h>
67#include <net/ip.h>
68#include <net/tcp_memcontrol.h>
69#include "slab.h"
70
71#include <asm/uaccess.h>
72
73#include <trace/events/vmscan.h>
74
75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
77
78#define MEM_CGROUP_RECLAIM_RETRIES 5
79static struct mem_cgroup *root_mem_cgroup __read_mostly;
80
81
82#ifdef CONFIG_MEMCG_SWAP
83int do_swap_account __read_mostly;
84#else
85#define do_swap_account 0
86#endif
87
88static const char * const mem_cgroup_stat_names[] = {
89 "cache",
90 "rss",
91 "rss_huge",
92 "mapped_file",
93 "writeback",
94 "swap",
95};
96
97static const char * const mem_cgroup_events_names[] = {
98 "pgpgin",
99 "pgpgout",
100 "pgfault",
101 "pgmajfault",
102};
103
104static const char * const mem_cgroup_lru_names[] = {
105 "inactive_anon",
106 "active_anon",
107 "inactive_file",
108 "active_file",
109 "unevictable",
110};
111
112
113
114
115
116
117
118enum mem_cgroup_events_target {
119 MEM_CGROUP_TARGET_THRESH,
120 MEM_CGROUP_TARGET_SOFTLIMIT,
121 MEM_CGROUP_TARGET_NUMAINFO,
122 MEM_CGROUP_NTARGETS,
123};
124#define THRESHOLDS_EVENTS_TARGET 128
125#define SOFTLIMIT_EVENTS_TARGET 1024
126#define NUMAINFO_EVENTS_TARGET 1024
127
128struct mem_cgroup_stat_cpu {
129 long count[MEM_CGROUP_STAT_NSTATS];
130 unsigned long events[MEMCG_NR_EVENTS];
131 unsigned long nr_page_events;
132 unsigned long targets[MEM_CGROUP_NTARGETS];
133};
134
135struct reclaim_iter {
136 struct mem_cgroup *position;
137
138 unsigned int generation;
139};
140
141
142
143
144struct mem_cgroup_per_zone {
145 struct lruvec lruvec;
146 unsigned long lru_size[NR_LRU_LISTS];
147
148 struct reclaim_iter iter[DEF_PRIORITY + 1];
149
150 struct rb_node tree_node;
151 unsigned long usage_in_excess;
152
153 bool on_tree;
154 struct mem_cgroup *memcg;
155
156};
157
158struct mem_cgroup_per_node {
159 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
160};
161
162
163
164
165
166
167struct mem_cgroup_tree_per_zone {
168 struct rb_root rb_root;
169 spinlock_t lock;
170};
171
172struct mem_cgroup_tree_per_node {
173 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
174};
175
176struct mem_cgroup_tree {
177 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
178};
179
180static struct mem_cgroup_tree soft_limit_tree __read_mostly;
181
182struct mem_cgroup_threshold {
183 struct eventfd_ctx *eventfd;
184 unsigned long threshold;
185};
186
187
188struct mem_cgroup_threshold_ary {
189
190 int current_threshold;
191
192 unsigned int size;
193
194 struct mem_cgroup_threshold entries[0];
195};
196
197struct mem_cgroup_thresholds {
198
199 struct mem_cgroup_threshold_ary *primary;
200
201
202
203
204
205 struct mem_cgroup_threshold_ary *spare;
206};
207
208
209struct mem_cgroup_eventfd_list {
210 struct list_head list;
211 struct eventfd_ctx *eventfd;
212};
213
214
215
216
217struct mem_cgroup_event {
218
219
220
221 struct mem_cgroup *memcg;
222
223
224
225 struct eventfd_ctx *eventfd;
226
227
228
229 struct list_head list;
230
231
232
233
234
235 int (*register_event)(struct mem_cgroup *memcg,
236 struct eventfd_ctx *eventfd, const char *args);
237
238
239
240
241
242 void (*unregister_event)(struct mem_cgroup *memcg,
243 struct eventfd_ctx *eventfd);
244
245
246
247
248 poll_table pt;
249 wait_queue_head_t *wqh;
250 wait_queue_t wait;
251 struct work_struct remove;
252};
253
254static void mem_cgroup_threshold(struct mem_cgroup *memcg);
255static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
256
257
258
259
260
261
262
263struct mem_cgroup {
264 struct cgroup_subsys_state css;
265
266
267 struct page_counter memory;
268 struct page_counter memsw;
269 struct page_counter kmem;
270
271
272 unsigned long low;
273 unsigned long high;
274
275 unsigned long soft_limit;
276
277
278 struct vmpressure vmpressure;
279
280
281 int initialized;
282
283
284
285
286 bool use_hierarchy;
287
288 bool oom_lock;
289 atomic_t under_oom;
290 atomic_t oom_wakeups;
291
292 int swappiness;
293
294 int oom_kill_disable;
295
296
297 struct mutex thresholds_lock;
298
299
300 struct mem_cgroup_thresholds thresholds;
301
302
303 struct mem_cgroup_thresholds memsw_thresholds;
304
305
306 struct list_head oom_notify;
307
308
309
310
311
312 unsigned long move_charge_at_immigrate;
313
314
315
316 atomic_t moving_account;
317
318 spinlock_t move_lock;
319 struct task_struct *move_lock_task;
320 unsigned long move_lock_flags;
321
322
323
324 struct mem_cgroup_stat_cpu __percpu *stat;
325
326
327
328
329 struct mem_cgroup_stat_cpu nocpu_base;
330 spinlock_t pcp_counter_lock;
331
332#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
333 struct cg_proto tcp_mem;
334#endif
335#if defined(CONFIG_MEMCG_KMEM)
336
337 int kmemcg_id;
338 bool kmem_acct_activated;
339 bool kmem_acct_active;
340#endif
341
342 int last_scanned_node;
343#if MAX_NUMNODES > 1
344 nodemask_t scan_nodes;
345 atomic_t numainfo_events;
346 atomic_t numainfo_updating;
347#endif
348
349
350 struct list_head event_list;
351 spinlock_t event_list_lock;
352
353 struct mem_cgroup_per_node *nodeinfo[0];
354
355};
356
357#ifdef CONFIG_MEMCG_KMEM
358bool memcg_kmem_is_active(struct mem_cgroup *memcg)
359{
360 return memcg->kmem_acct_active;
361}
362#endif
363
364
365
366
367
368#define MOVE_ANON 0x1U
369#define MOVE_FILE 0x2U
370#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
371
372
373static struct move_charge_struct {
374 spinlock_t lock;
375 struct mem_cgroup *from;
376 struct mem_cgroup *to;
377 unsigned long flags;
378 unsigned long precharge;
379 unsigned long moved_charge;
380 unsigned long moved_swap;
381 struct task_struct *moving_task;
382 wait_queue_head_t waitq;
383} mc = {
384 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
385 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
386};
387
388
389
390
391
392#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
393#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
394
395enum charge_type {
396 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
397 MEM_CGROUP_CHARGE_TYPE_ANON,
398 MEM_CGROUP_CHARGE_TYPE_SWAPOUT,
399 MEM_CGROUP_CHARGE_TYPE_DROP,
400 NR_CHARGE_TYPE,
401};
402
403
404enum res_type {
405 _MEM,
406 _MEMSWAP,
407 _OOM_TYPE,
408 _KMEM,
409};
410
411#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
412#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
413#define MEMFILE_ATTR(val) ((val) & 0xffff)
414
415#define OOM_CONTROL (0)
416
417
418
419
420
421
422static DEFINE_MUTEX(memcg_create_mutex);
423
424struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
425{
426 return s ? container_of(s, struct mem_cgroup, css) : NULL;
427}
428
429
430struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
431{
432 if (!memcg)
433 memcg = root_mem_cgroup;
434 return &memcg->vmpressure;
435}
436
437struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
438{
439 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
440}
441
442static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
443{
444 return (memcg == root_mem_cgroup);
445}
446
447
448
449
450
451#define MEM_CGROUP_ID_MAX USHRT_MAX
452
453static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
454{
455 return memcg->css.id;
456}
457
458
459
460
461
462
463
464static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
465{
466 struct cgroup_subsys_state *css;
467
468 css = css_from_id(id, &memory_cgrp_subsys);
469 return mem_cgroup_from_css(css);
470}
471
472
473#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
474
475void sock_update_memcg(struct sock *sk)
476{
477 if (mem_cgroup_sockets_enabled) {
478 struct mem_cgroup *memcg;
479 struct cg_proto *cg_proto;
480
481 BUG_ON(!sk->sk_prot->proto_cgroup);
482
483
484
485
486
487
488
489
490
491 if (sk->sk_cgrp) {
492 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
493 css_get(&sk->sk_cgrp->memcg->css);
494 return;
495 }
496
497 rcu_read_lock();
498 memcg = mem_cgroup_from_task(current);
499 cg_proto = sk->sk_prot->proto_cgroup(memcg);
500 if (!mem_cgroup_is_root(memcg) &&
501 memcg_proto_active(cg_proto) &&
502 css_tryget_online(&memcg->css)) {
503 sk->sk_cgrp = cg_proto;
504 }
505 rcu_read_unlock();
506 }
507}
508EXPORT_SYMBOL(sock_update_memcg);
509
510void sock_release_memcg(struct sock *sk)
511{
512 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
513 struct mem_cgroup *memcg;
514 WARN_ON(!sk->sk_cgrp->memcg);
515 memcg = sk->sk_cgrp->memcg;
516 css_put(&sk->sk_cgrp->memcg->css);
517 }
518}
519
520struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
521{
522 if (!memcg || mem_cgroup_is_root(memcg))
523 return NULL;
524
525 return &memcg->tcp_mem;
526}
527EXPORT_SYMBOL(tcp_proto_cgroup);
528
529#endif
530
531#ifdef CONFIG_MEMCG_KMEM
532
533
534
535
536
537
538
539
540
541
542
543static DEFINE_IDA(memcg_cache_ida);
544int memcg_nr_cache_ids;
545
546
547static DECLARE_RWSEM(memcg_cache_ids_sem);
548
549void memcg_get_cache_ids(void)
550{
551 down_read(&memcg_cache_ids_sem);
552}
553
554void memcg_put_cache_ids(void)
555{
556 up_read(&memcg_cache_ids_sem);
557}
558
559
560
561
562
563
564
565
566
567
568
569
570
571#define MEMCG_CACHES_MIN_SIZE 4
572#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
573
574
575
576
577
578
579
580struct static_key memcg_kmem_enabled_key;
581EXPORT_SYMBOL(memcg_kmem_enabled_key);
582
583#endif
584
585static struct mem_cgroup_per_zone *
586mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
587{
588 int nid = zone_to_nid(zone);
589 int zid = zone_idx(zone);
590
591 return &memcg->nodeinfo[nid]->zoneinfo[zid];
592}
593
594struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
595{
596 return &memcg->css;
597}
598
599static struct mem_cgroup_per_zone *
600mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
601{
602 int nid = page_to_nid(page);
603 int zid = page_zonenum(page);
604
605 return &memcg->nodeinfo[nid]->zoneinfo[zid];
606}
607
608static struct mem_cgroup_tree_per_zone *
609soft_limit_tree_node_zone(int nid, int zid)
610{
611 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
612}
613
614static struct mem_cgroup_tree_per_zone *
615soft_limit_tree_from_page(struct page *page)
616{
617 int nid = page_to_nid(page);
618 int zid = page_zonenum(page);
619
620 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
621}
622
623static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
624 struct mem_cgroup_tree_per_zone *mctz,
625 unsigned long new_usage_in_excess)
626{
627 struct rb_node **p = &mctz->rb_root.rb_node;
628 struct rb_node *parent = NULL;
629 struct mem_cgroup_per_zone *mz_node;
630
631 if (mz->on_tree)
632 return;
633
634 mz->usage_in_excess = new_usage_in_excess;
635 if (!mz->usage_in_excess)
636 return;
637 while (*p) {
638 parent = *p;
639 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
640 tree_node);
641 if (mz->usage_in_excess < mz_node->usage_in_excess)
642 p = &(*p)->rb_left;
643
644
645
646
647 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
648 p = &(*p)->rb_right;
649 }
650 rb_link_node(&mz->tree_node, parent, p);
651 rb_insert_color(&mz->tree_node, &mctz->rb_root);
652 mz->on_tree = true;
653}
654
655static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
656 struct mem_cgroup_tree_per_zone *mctz)
657{
658 if (!mz->on_tree)
659 return;
660 rb_erase(&mz->tree_node, &mctz->rb_root);
661 mz->on_tree = false;
662}
663
664static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
665 struct mem_cgroup_tree_per_zone *mctz)
666{
667 unsigned long flags;
668
669 spin_lock_irqsave(&mctz->lock, flags);
670 __mem_cgroup_remove_exceeded(mz, mctz);
671 spin_unlock_irqrestore(&mctz->lock, flags);
672}
673
674static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
675{
676 unsigned long nr_pages = page_counter_read(&memcg->memory);
677 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
678 unsigned long excess = 0;
679
680 if (nr_pages > soft_limit)
681 excess = nr_pages - soft_limit;
682
683 return excess;
684}
685
686static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
687{
688 unsigned long excess;
689 struct mem_cgroup_per_zone *mz;
690 struct mem_cgroup_tree_per_zone *mctz;
691
692 mctz = soft_limit_tree_from_page(page);
693
694
695
696
697 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
698 mz = mem_cgroup_page_zoneinfo(memcg, page);
699 excess = soft_limit_excess(memcg);
700
701
702
703
704 if (excess || mz->on_tree) {
705 unsigned long flags;
706
707 spin_lock_irqsave(&mctz->lock, flags);
708
709 if (mz->on_tree)
710 __mem_cgroup_remove_exceeded(mz, mctz);
711
712
713
714
715 __mem_cgroup_insert_exceeded(mz, mctz, excess);
716 spin_unlock_irqrestore(&mctz->lock, flags);
717 }
718 }
719}
720
721static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
722{
723 struct mem_cgroup_tree_per_zone *mctz;
724 struct mem_cgroup_per_zone *mz;
725 int nid, zid;
726
727 for_each_node(nid) {
728 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
729 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
730 mctz = soft_limit_tree_node_zone(nid, zid);
731 mem_cgroup_remove_exceeded(mz, mctz);
732 }
733 }
734}
735
736static struct mem_cgroup_per_zone *
737__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
738{
739 struct rb_node *rightmost = NULL;
740 struct mem_cgroup_per_zone *mz;
741
742retry:
743 mz = NULL;
744 rightmost = rb_last(&mctz->rb_root);
745 if (!rightmost)
746 goto done;
747
748 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
749
750
751
752
753
754 __mem_cgroup_remove_exceeded(mz, mctz);
755 if (!soft_limit_excess(mz->memcg) ||
756 !css_tryget_online(&mz->memcg->css))
757 goto retry;
758done:
759 return mz;
760}
761
762static struct mem_cgroup_per_zone *
763mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
764{
765 struct mem_cgroup_per_zone *mz;
766
767 spin_lock_irq(&mctz->lock);
768 mz = __mem_cgroup_largest_soft_limit_node(mctz);
769 spin_unlock_irq(&mctz->lock);
770 return mz;
771}
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
793 enum mem_cgroup_stat_index idx)
794{
795 long val = 0;
796 int cpu;
797
798 get_online_cpus();
799 for_each_online_cpu(cpu)
800 val += per_cpu(memcg->stat->count[idx], cpu);
801#ifdef CONFIG_HOTPLUG_CPU
802 spin_lock(&memcg->pcp_counter_lock);
803 val += memcg->nocpu_base.count[idx];
804 spin_unlock(&memcg->pcp_counter_lock);
805#endif
806 put_online_cpus();
807 return val;
808}
809
810static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
811 enum mem_cgroup_events_index idx)
812{
813 unsigned long val = 0;
814 int cpu;
815
816 get_online_cpus();
817 for_each_online_cpu(cpu)
818 val += per_cpu(memcg->stat->events[idx], cpu);
819#ifdef CONFIG_HOTPLUG_CPU
820 spin_lock(&memcg->pcp_counter_lock);
821 val += memcg->nocpu_base.events[idx];
822 spin_unlock(&memcg->pcp_counter_lock);
823#endif
824 put_online_cpus();
825 return val;
826}
827
828static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
829 struct page *page,
830 int nr_pages)
831{
832
833
834
835
836 if (PageAnon(page))
837 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
838 nr_pages);
839 else
840 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
841 nr_pages);
842
843 if (PageTransHuge(page))
844 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
845 nr_pages);
846
847
848 if (nr_pages > 0)
849 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
850 else {
851 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
852 nr_pages = -nr_pages;
853 }
854
855 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
856}
857
858unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
859{
860 struct mem_cgroup_per_zone *mz;
861
862 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
863 return mz->lru_size[lru];
864}
865
866static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
867 int nid,
868 unsigned int lru_mask)
869{
870 unsigned long nr = 0;
871 int zid;
872
873 VM_BUG_ON((unsigned)nid >= nr_node_ids);
874
875 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
876 struct mem_cgroup_per_zone *mz;
877 enum lru_list lru;
878
879 for_each_lru(lru) {
880 if (!(BIT(lru) & lru_mask))
881 continue;
882 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
883 nr += mz->lru_size[lru];
884 }
885 }
886 return nr;
887}
888
889static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
890 unsigned int lru_mask)
891{
892 unsigned long nr = 0;
893 int nid;
894
895 for_each_node_state(nid, N_MEMORY)
896 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
897 return nr;
898}
899
900static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
901 enum mem_cgroup_events_target target)
902{
903 unsigned long val, next;
904
905 val = __this_cpu_read(memcg->stat->nr_page_events);
906 next = __this_cpu_read(memcg->stat->targets[target]);
907
908 if ((long)next - (long)val < 0) {
909 switch (target) {
910 case MEM_CGROUP_TARGET_THRESH:
911 next = val + THRESHOLDS_EVENTS_TARGET;
912 break;
913 case MEM_CGROUP_TARGET_SOFTLIMIT:
914 next = val + SOFTLIMIT_EVENTS_TARGET;
915 break;
916 case MEM_CGROUP_TARGET_NUMAINFO:
917 next = val + NUMAINFO_EVENTS_TARGET;
918 break;
919 default:
920 break;
921 }
922 __this_cpu_write(memcg->stat->targets[target], next);
923 return true;
924 }
925 return false;
926}
927
928
929
930
931
932static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
933{
934
935 if (unlikely(mem_cgroup_event_ratelimit(memcg,
936 MEM_CGROUP_TARGET_THRESH))) {
937 bool do_softlimit;
938 bool do_numainfo __maybe_unused;
939
940 do_softlimit = mem_cgroup_event_ratelimit(memcg,
941 MEM_CGROUP_TARGET_SOFTLIMIT);
942#if MAX_NUMNODES > 1
943 do_numainfo = mem_cgroup_event_ratelimit(memcg,
944 MEM_CGROUP_TARGET_NUMAINFO);
945#endif
946 mem_cgroup_threshold(memcg);
947 if (unlikely(do_softlimit))
948 mem_cgroup_update_tree(memcg, page);
949#if MAX_NUMNODES > 1
950 if (unlikely(do_numainfo))
951 atomic_inc(&memcg->numainfo_events);
952#endif
953 }
954}
955
956struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
957{
958
959
960
961
962
963 if (unlikely(!p))
964 return NULL;
965
966 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
967}
968
969static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
970{
971 struct mem_cgroup *memcg = NULL;
972
973 rcu_read_lock();
974 do {
975
976
977
978
979
980 if (unlikely(!mm))
981 memcg = root_mem_cgroup;
982 else {
983 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
984 if (unlikely(!memcg))
985 memcg = root_mem_cgroup;
986 }
987 } while (!css_tryget_online(&memcg->css));
988 rcu_read_unlock();
989 return memcg;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1010 struct mem_cgroup *prev,
1011 struct mem_cgroup_reclaim_cookie *reclaim)
1012{
1013 struct reclaim_iter *uninitialized_var(iter);
1014 struct cgroup_subsys_state *css = NULL;
1015 struct mem_cgroup *memcg = NULL;
1016 struct mem_cgroup *pos = NULL;
1017
1018 if (mem_cgroup_disabled())
1019 return NULL;
1020
1021 if (!root)
1022 root = root_mem_cgroup;
1023
1024 if (prev && !reclaim)
1025 pos = prev;
1026
1027 if (!root->use_hierarchy && root != root_mem_cgroup) {
1028 if (prev)
1029 goto out;
1030 return root;
1031 }
1032
1033 rcu_read_lock();
1034
1035 if (reclaim) {
1036 struct mem_cgroup_per_zone *mz;
1037
1038 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1039 iter = &mz->iter[reclaim->priority];
1040
1041 if (prev && reclaim->generation != iter->generation)
1042 goto out_unlock;
1043
1044 do {
1045 pos = READ_ONCE(iter->position);
1046
1047
1048
1049
1050
1051 } while (pos && !css_tryget(&pos->css));
1052 }
1053
1054 if (pos)
1055 css = &pos->css;
1056
1057 for (;;) {
1058 css = css_next_descendant_pre(css, &root->css);
1059 if (!css) {
1060
1061
1062
1063
1064
1065
1066 if (!prev)
1067 continue;
1068 break;
1069 }
1070
1071
1072
1073
1074
1075
1076 memcg = mem_cgroup_from_css(css);
1077
1078 if (css == &root->css)
1079 break;
1080
1081 if (css_tryget(css)) {
1082
1083
1084
1085
1086
1087 if (smp_load_acquire(&memcg->initialized))
1088 break;
1089
1090 css_put(css);
1091 }
1092
1093 memcg = NULL;
1094 }
1095
1096 if (reclaim) {
1097 if (cmpxchg(&iter->position, pos, memcg) == pos) {
1098 if (memcg)
1099 css_get(&memcg->css);
1100 if (pos)
1101 css_put(&pos->css);
1102 }
1103
1104
1105
1106
1107
1108 if (pos)
1109 css_put(&pos->css);
1110
1111 if (!memcg)
1112 iter->generation++;
1113 else if (!prev)
1114 reclaim->generation = iter->generation;
1115 }
1116
1117out_unlock:
1118 rcu_read_unlock();
1119out:
1120 if (prev && prev != root)
1121 css_put(&prev->css);
1122
1123 return memcg;
1124}
1125
1126
1127
1128
1129
1130
1131void mem_cgroup_iter_break(struct mem_cgroup *root,
1132 struct mem_cgroup *prev)
1133{
1134 if (!root)
1135 root = root_mem_cgroup;
1136 if (prev && prev != root)
1137 css_put(&prev->css);
1138}
1139
1140
1141
1142
1143
1144
1145#define for_each_mem_cgroup_tree(iter, root) \
1146 for (iter = mem_cgroup_iter(root, NULL, NULL); \
1147 iter != NULL; \
1148 iter = mem_cgroup_iter(root, iter, NULL))
1149
1150#define for_each_mem_cgroup(iter) \
1151 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
1152 iter != NULL; \
1153 iter = mem_cgroup_iter(NULL, iter, NULL))
1154
1155void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1156{
1157 struct mem_cgroup *memcg;
1158
1159 rcu_read_lock();
1160 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1161 if (unlikely(!memcg))
1162 goto out;
1163
1164 switch (idx) {
1165 case PGFAULT:
1166 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1167 break;
1168 case PGMAJFAULT:
1169 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1170 break;
1171 default:
1172 BUG();
1173 }
1174out:
1175 rcu_read_unlock();
1176}
1177EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1189 struct mem_cgroup *memcg)
1190{
1191 struct mem_cgroup_per_zone *mz;
1192 struct lruvec *lruvec;
1193
1194 if (mem_cgroup_disabled()) {
1195 lruvec = &zone->lruvec;
1196 goto out;
1197 }
1198
1199 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
1200 lruvec = &mz->lruvec;
1201out:
1202
1203
1204
1205
1206
1207 if (unlikely(lruvec->zone != zone))
1208 lruvec->zone = zone;
1209 return lruvec;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1222{
1223 struct mem_cgroup_per_zone *mz;
1224 struct mem_cgroup *memcg;
1225 struct lruvec *lruvec;
1226
1227 if (mem_cgroup_disabled()) {
1228 lruvec = &zone->lruvec;
1229 goto out;
1230 }
1231
1232 memcg = page->mem_cgroup;
1233
1234
1235
1236
1237 if (!memcg)
1238 memcg = root_mem_cgroup;
1239
1240 mz = mem_cgroup_page_zoneinfo(memcg, page);
1241 lruvec = &mz->lruvec;
1242out:
1243
1244
1245
1246
1247
1248 if (unlikely(lruvec->zone != zone))
1249 lruvec->zone = zone;
1250 return lruvec;
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1263 int nr_pages)
1264{
1265 struct mem_cgroup_per_zone *mz;
1266 unsigned long *lru_size;
1267
1268 if (mem_cgroup_disabled())
1269 return;
1270
1271 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1272 lru_size = mz->lru_size + lru;
1273 *lru_size += nr_pages;
1274 VM_BUG_ON((long)(*lru_size) < 0);
1275}
1276
1277bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
1278{
1279 if (root == memcg)
1280 return true;
1281 if (!root->use_hierarchy)
1282 return false;
1283 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
1284}
1285
1286bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1287{
1288 struct mem_cgroup *task_memcg;
1289 struct task_struct *p;
1290 bool ret;
1291
1292 p = find_lock_task_mm(task);
1293 if (p) {
1294 task_memcg = get_mem_cgroup_from_mm(p->mm);
1295 task_unlock(p);
1296 } else {
1297
1298
1299
1300
1301
1302 rcu_read_lock();
1303 task_memcg = mem_cgroup_from_task(task);
1304 css_get(&task_memcg->css);
1305 rcu_read_unlock();
1306 }
1307 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1308 css_put(&task_memcg->css);
1309 return ret;
1310}
1311
1312int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1313{
1314 unsigned long inactive_ratio;
1315 unsigned long inactive;
1316 unsigned long active;
1317 unsigned long gb;
1318
1319 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1320 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1321
1322 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1323 if (gb)
1324 inactive_ratio = int_sqrt(10 * gb);
1325 else
1326 inactive_ratio = 1;
1327
1328 return inactive * inactive_ratio < active;
1329}
1330
1331bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
1332{
1333 struct mem_cgroup_per_zone *mz;
1334 struct mem_cgroup *memcg;
1335
1336 if (mem_cgroup_disabled())
1337 return true;
1338
1339 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1340 memcg = mz->memcg;
1341
1342 return !!(memcg->css.flags & CSS_ONLINE);
1343}
1344
1345#define mem_cgroup_from_counter(counter, member) \
1346 container_of(counter, struct mem_cgroup, member)
1347
1348
1349
1350
1351
1352
1353
1354
1355static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1356{
1357 unsigned long margin = 0;
1358 unsigned long count;
1359 unsigned long limit;
1360
1361 count = page_counter_read(&memcg->memory);
1362 limit = READ_ONCE(memcg->memory.limit);
1363 if (count < limit)
1364 margin = limit - count;
1365
1366 if (do_swap_account) {
1367 count = page_counter_read(&memcg->memsw);
1368 limit = READ_ONCE(memcg->memsw.limit);
1369 if (count <= limit)
1370 margin = min(margin, limit - count);
1371 }
1372
1373 return margin;
1374}
1375
1376int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1377{
1378
1379 if (mem_cgroup_disabled() || !memcg->css.parent)
1380 return vm_swappiness;
1381
1382 return memcg->swappiness;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1393{
1394 struct mem_cgroup *from;
1395 struct mem_cgroup *to;
1396 bool ret = false;
1397
1398
1399
1400
1401 spin_lock(&mc.lock);
1402 from = mc.from;
1403 to = mc.to;
1404 if (!from)
1405 goto unlock;
1406
1407 ret = mem_cgroup_is_descendant(from, memcg) ||
1408 mem_cgroup_is_descendant(to, memcg);
1409unlock:
1410 spin_unlock(&mc.lock);
1411 return ret;
1412}
1413
1414static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1415{
1416 if (mc.moving_task && current != mc.moving_task) {
1417 if (mem_cgroup_under_move(memcg)) {
1418 DEFINE_WAIT(wait);
1419 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1420
1421 if (mc.moving_task)
1422 schedule();
1423 finish_wait(&mc.waitq, &wait);
1424 return true;
1425 }
1426 }
1427 return false;
1428}
1429
1430#define K(x) ((x) << (PAGE_SHIFT-10))
1431
1432
1433
1434
1435
1436
1437
1438
1439void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1440{
1441
1442 static DEFINE_MUTEX(oom_info_lock);
1443 struct mem_cgroup *iter;
1444 unsigned int i;
1445
1446 mutex_lock(&oom_info_lock);
1447 rcu_read_lock();
1448
1449 if (p) {
1450 pr_info("Task in ");
1451 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1452 pr_cont(" killed as a result of limit of ");
1453 } else {
1454 pr_info("Memory limit reached of cgroup ");
1455 }
1456
1457 pr_cont_cgroup_path(memcg->css.cgroup);
1458 pr_cont("\n");
1459
1460 rcu_read_unlock();
1461
1462 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1463 K((u64)page_counter_read(&memcg->memory)),
1464 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1465 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1466 K((u64)page_counter_read(&memcg->memsw)),
1467 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1468 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1469 K((u64)page_counter_read(&memcg->kmem)),
1470 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1471
1472 for_each_mem_cgroup_tree(iter, memcg) {
1473 pr_info("Memory cgroup stats for ");
1474 pr_cont_cgroup_path(iter->css.cgroup);
1475 pr_cont(":");
1476
1477 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1478 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1479 continue;
1480 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1481 K(mem_cgroup_read_stat(iter, i)));
1482 }
1483
1484 for (i = 0; i < NR_LRU_LISTS; i++)
1485 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1486 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1487
1488 pr_cont("\n");
1489 }
1490 mutex_unlock(&oom_info_lock);
1491}
1492
1493
1494
1495
1496
1497static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1498{
1499 int num = 0;
1500 struct mem_cgroup *iter;
1501
1502 for_each_mem_cgroup_tree(iter, memcg)
1503 num++;
1504 return num;
1505}
1506
1507
1508
1509
1510static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1511{
1512 unsigned long limit;
1513
1514 limit = memcg->memory.limit;
1515 if (mem_cgroup_swappiness(memcg)) {
1516 unsigned long memsw_limit;
1517
1518 memsw_limit = memcg->memsw.limit;
1519 limit = min(limit + total_swap_pages, memsw_limit);
1520 }
1521 return limit;
1522}
1523
1524static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1525 int order)
1526{
1527 struct mem_cgroup *iter;
1528 unsigned long chosen_points = 0;
1529 unsigned long totalpages;
1530 unsigned int points = 0;
1531 struct task_struct *chosen = NULL;
1532
1533
1534
1535
1536
1537
1538 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1539 mark_tsk_oom_victim(current);
1540 return;
1541 }
1542
1543 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg);
1544 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1545 for_each_mem_cgroup_tree(iter, memcg) {
1546 struct css_task_iter it;
1547 struct task_struct *task;
1548
1549 css_task_iter_start(&iter->css, &it);
1550 while ((task = css_task_iter_next(&it))) {
1551 switch (oom_scan_process_thread(task, totalpages, NULL,
1552 false)) {
1553 case OOM_SCAN_SELECT:
1554 if (chosen)
1555 put_task_struct(chosen);
1556 chosen = task;
1557 chosen_points = ULONG_MAX;
1558 get_task_struct(chosen);
1559
1560 case OOM_SCAN_CONTINUE:
1561 continue;
1562 case OOM_SCAN_ABORT:
1563 css_task_iter_end(&it);
1564 mem_cgroup_iter_break(memcg, iter);
1565 if (chosen)
1566 put_task_struct(chosen);
1567 return;
1568 case OOM_SCAN_OK:
1569 break;
1570 };
1571 points = oom_badness(task, memcg, NULL, totalpages);
1572 if (!points || points < chosen_points)
1573 continue;
1574
1575 if (points == chosen_points &&
1576 thread_group_leader(chosen))
1577 continue;
1578
1579 if (chosen)
1580 put_task_struct(chosen);
1581 chosen = task;
1582 chosen_points = points;
1583 get_task_struct(chosen);
1584 }
1585 css_task_iter_end(&it);
1586 }
1587
1588 if (!chosen)
1589 return;
1590 points = chosen_points * 1000 / totalpages;
1591 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1592 NULL, "Memory cgroup out of memory");
1593}
1594
1595#if MAX_NUMNODES > 1
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1608 int nid, bool noswap)
1609{
1610 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1611 return true;
1612 if (noswap || !total_swap_pages)
1613 return false;
1614 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1615 return true;
1616 return false;
1617
1618}
1619
1620
1621
1622
1623
1624
1625
1626static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1627{
1628 int nid;
1629
1630
1631
1632
1633 if (!atomic_read(&memcg->numainfo_events))
1634 return;
1635 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1636 return;
1637
1638
1639 memcg->scan_nodes = node_states[N_MEMORY];
1640
1641 for_each_node_mask(nid, node_states[N_MEMORY]) {
1642
1643 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1644 node_clear(nid, memcg->scan_nodes);
1645 }
1646
1647 atomic_set(&memcg->numainfo_events, 0);
1648 atomic_set(&memcg->numainfo_updating, 0);
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1664{
1665 int node;
1666
1667 mem_cgroup_may_update_nodemask(memcg);
1668 node = memcg->last_scanned_node;
1669
1670 node = next_node(node, memcg->scan_nodes);
1671 if (node == MAX_NUMNODES)
1672 node = first_node(memcg->scan_nodes);
1673
1674
1675
1676
1677
1678
1679 if (unlikely(node == MAX_NUMNODES))
1680 node = numa_node_id();
1681
1682 memcg->last_scanned_node = node;
1683 return node;
1684}
1685#else
1686int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1687{
1688 return 0;
1689}
1690#endif
1691
1692static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1693 struct zone *zone,
1694 gfp_t gfp_mask,
1695 unsigned long *total_scanned)
1696{
1697 struct mem_cgroup *victim = NULL;
1698 int total = 0;
1699 int loop = 0;
1700 unsigned long excess;
1701 unsigned long nr_scanned;
1702 struct mem_cgroup_reclaim_cookie reclaim = {
1703 .zone = zone,
1704 .priority = 0,
1705 };
1706
1707 excess = soft_limit_excess(root_memcg);
1708
1709 while (1) {
1710 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1711 if (!victim) {
1712 loop++;
1713 if (loop >= 2) {
1714
1715
1716
1717
1718
1719 if (!total)
1720 break;
1721
1722
1723
1724
1725
1726
1727 if (total >= (excess >> 2) ||
1728 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1729 break;
1730 }
1731 continue;
1732 }
1733 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1734 zone, &nr_scanned);
1735 *total_scanned += nr_scanned;
1736 if (!soft_limit_excess(root_memcg))
1737 break;
1738 }
1739 mem_cgroup_iter_break(root_memcg, victim);
1740 return total;
1741}
1742
1743#ifdef CONFIG_LOCKDEP
1744static struct lockdep_map memcg_oom_lock_dep_map = {
1745 .name = "memcg_oom_lock",
1746};
1747#endif
1748
1749static DEFINE_SPINLOCK(memcg_oom_lock);
1750
1751
1752
1753
1754
1755static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1756{
1757 struct mem_cgroup *iter, *failed = NULL;
1758
1759 spin_lock(&memcg_oom_lock);
1760
1761 for_each_mem_cgroup_tree(iter, memcg) {
1762 if (iter->oom_lock) {
1763
1764
1765
1766
1767 failed = iter;
1768 mem_cgroup_iter_break(memcg, iter);
1769 break;
1770 } else
1771 iter->oom_lock = true;
1772 }
1773
1774 if (failed) {
1775
1776
1777
1778
1779 for_each_mem_cgroup_tree(iter, memcg) {
1780 if (iter == failed) {
1781 mem_cgroup_iter_break(memcg, iter);
1782 break;
1783 }
1784 iter->oom_lock = false;
1785 }
1786 } else
1787 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1788
1789 spin_unlock(&memcg_oom_lock);
1790
1791 return !failed;
1792}
1793
1794static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1795{
1796 struct mem_cgroup *iter;
1797
1798 spin_lock(&memcg_oom_lock);
1799 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1800 for_each_mem_cgroup_tree(iter, memcg)
1801 iter->oom_lock = false;
1802 spin_unlock(&memcg_oom_lock);
1803}
1804
1805static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1806{
1807 struct mem_cgroup *iter;
1808
1809 for_each_mem_cgroup_tree(iter, memcg)
1810 atomic_inc(&iter->under_oom);
1811}
1812
1813static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1814{
1815 struct mem_cgroup *iter;
1816
1817
1818
1819
1820
1821
1822 for_each_mem_cgroup_tree(iter, memcg)
1823 atomic_add_unless(&iter->under_oom, -1, 0);
1824}
1825
1826static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1827
1828struct oom_wait_info {
1829 struct mem_cgroup *memcg;
1830 wait_queue_t wait;
1831};
1832
1833static int memcg_oom_wake_function(wait_queue_t *wait,
1834 unsigned mode, int sync, void *arg)
1835{
1836 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1837 struct mem_cgroup *oom_wait_memcg;
1838 struct oom_wait_info *oom_wait_info;
1839
1840 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1841 oom_wait_memcg = oom_wait_info->memcg;
1842
1843 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1844 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1845 return 0;
1846 return autoremove_wake_function(wait, mode, sync, arg);
1847}
1848
1849static void memcg_wakeup_oom(struct mem_cgroup *memcg)
1850{
1851 atomic_inc(&memcg->oom_wakeups);
1852
1853 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1854}
1855
1856static void memcg_oom_recover(struct mem_cgroup *memcg)
1857{
1858 if (memcg && atomic_read(&memcg->under_oom))
1859 memcg_wakeup_oom(memcg);
1860}
1861
1862static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1863{
1864 if (!current->memcg_oom.may_oom)
1865 return;
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 css_get(&memcg->css);
1881 current->memcg_oom.memcg = memcg;
1882 current->memcg_oom.gfp_mask = mask;
1883 current->memcg_oom.order = order;
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903bool mem_cgroup_oom_synchronize(bool handle)
1904{
1905 struct mem_cgroup *memcg = current->memcg_oom.memcg;
1906 struct oom_wait_info owait;
1907 bool locked;
1908
1909
1910 if (!memcg)
1911 return false;
1912
1913 if (!handle || oom_killer_disabled)
1914 goto cleanup;
1915
1916 owait.memcg = memcg;
1917 owait.wait.flags = 0;
1918 owait.wait.func = memcg_oom_wake_function;
1919 owait.wait.private = current;
1920 INIT_LIST_HEAD(&owait.wait.task_list);
1921
1922 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1923 mem_cgroup_mark_under_oom(memcg);
1924
1925 locked = mem_cgroup_oom_trylock(memcg);
1926
1927 if (locked)
1928 mem_cgroup_oom_notify(memcg);
1929
1930 if (locked && !memcg->oom_kill_disable) {
1931 mem_cgroup_unmark_under_oom(memcg);
1932 finish_wait(&memcg_oom_waitq, &owait.wait);
1933 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
1934 current->memcg_oom.order);
1935 } else {
1936 schedule();
1937 mem_cgroup_unmark_under_oom(memcg);
1938 finish_wait(&memcg_oom_waitq, &owait.wait);
1939 }
1940
1941 if (locked) {
1942 mem_cgroup_oom_unlock(memcg);
1943
1944
1945
1946
1947
1948 memcg_oom_recover(memcg);
1949 }
1950cleanup:
1951 current->memcg_oom.memcg = NULL;
1952 css_put(&memcg->css);
1953 return true;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
1970{
1971 struct mem_cgroup *memcg;
1972 unsigned long flags;
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986 rcu_read_lock();
1987
1988 if (mem_cgroup_disabled())
1989 return NULL;
1990again:
1991 memcg = page->mem_cgroup;
1992 if (unlikely(!memcg))
1993 return NULL;
1994
1995 if (atomic_read(&memcg->moving_account) <= 0)
1996 return memcg;
1997
1998 spin_lock_irqsave(&memcg->move_lock, flags);
1999 if (memcg != page->mem_cgroup) {
2000 spin_unlock_irqrestore(&memcg->move_lock, flags);
2001 goto again;
2002 }
2003
2004
2005
2006
2007
2008
2009 memcg->move_lock_task = current;
2010 memcg->move_lock_flags = flags;
2011
2012 return memcg;
2013}
2014
2015
2016
2017
2018
2019void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
2020{
2021 if (memcg && memcg->move_lock_task == current) {
2022 unsigned long flags = memcg->move_lock_flags;
2023
2024 memcg->move_lock_task = NULL;
2025 memcg->move_lock_flags = 0;
2026
2027 spin_unlock_irqrestore(&memcg->move_lock, flags);
2028 }
2029
2030 rcu_read_unlock();
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
2042 enum mem_cgroup_stat_index idx, int val)
2043{
2044 VM_BUG_ON(!rcu_read_lock_held());
2045
2046 if (memcg)
2047 this_cpu_add(memcg->stat->count[idx], val);
2048}
2049
2050
2051
2052
2053
2054#define CHARGE_BATCH 32U
2055struct memcg_stock_pcp {
2056 struct mem_cgroup *cached;
2057 unsigned int nr_pages;
2058 struct work_struct work;
2059 unsigned long flags;
2060#define FLUSHING_CACHED_CHARGE 0
2061};
2062static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2063static DEFINE_MUTEX(percpu_charge_mutex);
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2077{
2078 struct memcg_stock_pcp *stock;
2079 bool ret = false;
2080
2081 if (nr_pages > CHARGE_BATCH)
2082 return ret;
2083
2084 stock = &get_cpu_var(memcg_stock);
2085 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2086 stock->nr_pages -= nr_pages;
2087 ret = true;
2088 }
2089 put_cpu_var(memcg_stock);
2090 return ret;
2091}
2092
2093
2094
2095
2096static void drain_stock(struct memcg_stock_pcp *stock)
2097{
2098 struct mem_cgroup *old = stock->cached;
2099
2100 if (stock->nr_pages) {
2101 page_counter_uncharge(&old->memory, stock->nr_pages);
2102 if (do_swap_account)
2103 page_counter_uncharge(&old->memsw, stock->nr_pages);
2104 css_put_many(&old->css, stock->nr_pages);
2105 stock->nr_pages = 0;
2106 }
2107 stock->cached = NULL;
2108}
2109
2110
2111
2112
2113
2114static void drain_local_stock(struct work_struct *dummy)
2115{
2116 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
2117 drain_stock(stock);
2118 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2119}
2120
2121
2122
2123
2124
2125static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2126{
2127 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2128
2129 if (stock->cached != memcg) {
2130 drain_stock(stock);
2131 stock->cached = memcg;
2132 }
2133 stock->nr_pages += nr_pages;
2134 put_cpu_var(memcg_stock);
2135}
2136
2137
2138
2139
2140
2141static void drain_all_stock(struct mem_cgroup *root_memcg)
2142{
2143 int cpu, curcpu;
2144
2145
2146 if (!mutex_trylock(&percpu_charge_mutex))
2147 return;
2148
2149 get_online_cpus();
2150 curcpu = get_cpu();
2151 for_each_online_cpu(cpu) {
2152 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2153 struct mem_cgroup *memcg;
2154
2155 memcg = stock->cached;
2156 if (!memcg || !stock->nr_pages)
2157 continue;
2158 if (!mem_cgroup_is_descendant(memcg, root_memcg))
2159 continue;
2160 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2161 if (cpu == curcpu)
2162 drain_local_stock(&stock->work);
2163 else
2164 schedule_work_on(cpu, &stock->work);
2165 }
2166 }
2167 put_cpu();
2168 put_online_cpus();
2169 mutex_unlock(&percpu_charge_mutex);
2170}
2171
2172
2173
2174
2175
2176static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2177{
2178 int i;
2179
2180 spin_lock(&memcg->pcp_counter_lock);
2181 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2182 long x = per_cpu(memcg->stat->count[i], cpu);
2183
2184 per_cpu(memcg->stat->count[i], cpu) = 0;
2185 memcg->nocpu_base.count[i] += x;
2186 }
2187 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2188 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2189
2190 per_cpu(memcg->stat->events[i], cpu) = 0;
2191 memcg->nocpu_base.events[i] += x;
2192 }
2193 spin_unlock(&memcg->pcp_counter_lock);
2194}
2195
2196static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2197 unsigned long action,
2198 void *hcpu)
2199{
2200 int cpu = (unsigned long)hcpu;
2201 struct memcg_stock_pcp *stock;
2202 struct mem_cgroup *iter;
2203
2204 if (action == CPU_ONLINE)
2205 return NOTIFY_OK;
2206
2207 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2208 return NOTIFY_OK;
2209
2210 for_each_mem_cgroup(iter)
2211 mem_cgroup_drain_pcp_counter(iter, cpu);
2212
2213 stock = &per_cpu(memcg_stock, cpu);
2214 drain_stock(stock);
2215 return NOTIFY_OK;
2216}
2217
2218static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2219 unsigned int nr_pages)
2220{
2221 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2222 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2223 struct mem_cgroup *mem_over_limit;
2224 struct page_counter *counter;
2225 unsigned long nr_reclaimed;
2226 bool may_swap = true;
2227 bool drained = false;
2228 int ret = 0;
2229
2230 if (mem_cgroup_is_root(memcg))
2231 goto done;
2232retry:
2233 if (consume_stock(memcg, nr_pages))
2234 goto done;
2235
2236 if (!do_swap_account ||
2237 !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2238 if (!page_counter_try_charge(&memcg->memory, batch, &counter))
2239 goto done_restock;
2240 if (do_swap_account)
2241 page_counter_uncharge(&memcg->memsw, batch);
2242 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2243 } else {
2244 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2245 may_swap = false;
2246 }
2247
2248 if (batch > nr_pages) {
2249 batch = nr_pages;
2250 goto retry;
2251 }
2252
2253
2254
2255
2256
2257
2258
2259 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2260 fatal_signal_pending(current) ||
2261 current->flags & PF_EXITING))
2262 goto bypass;
2263
2264 if (unlikely(task_in_memcg_oom(current)))
2265 goto nomem;
2266
2267 if (!(gfp_mask & __GFP_WAIT))
2268 goto nomem;
2269
2270 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
2271
2272 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2273 gfp_mask, may_swap);
2274
2275 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2276 goto retry;
2277
2278 if (!drained) {
2279 drain_all_stock(mem_over_limit);
2280 drained = true;
2281 goto retry;
2282 }
2283
2284 if (gfp_mask & __GFP_NORETRY)
2285 goto nomem;
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2296 goto retry;
2297
2298
2299
2300
2301 if (mem_cgroup_wait_acct_move(mem_over_limit))
2302 goto retry;
2303
2304 if (nr_retries--)
2305 goto retry;
2306
2307 if (gfp_mask & __GFP_NOFAIL)
2308 goto bypass;
2309
2310 if (fatal_signal_pending(current))
2311 goto bypass;
2312
2313 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2314
2315 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
2316nomem:
2317 if (!(gfp_mask & __GFP_NOFAIL))
2318 return -ENOMEM;
2319bypass:
2320 return -EINTR;
2321
2322done_restock:
2323 css_get_many(&memcg->css, batch);
2324 if (batch > nr_pages)
2325 refill_stock(memcg, batch - nr_pages);
2326 if (!(gfp_mask & __GFP_WAIT))
2327 goto done;
2328
2329
2330
2331
2332 do {
2333 if (page_counter_read(&memcg->memory) <= memcg->high)
2334 continue;
2335 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
2336 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2337 } while ((memcg = parent_mem_cgroup(memcg)));
2338done:
2339 return ret;
2340}
2341
2342static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2343{
2344 if (mem_cgroup_is_root(memcg))
2345 return;
2346
2347 page_counter_uncharge(&memcg->memory, nr_pages);
2348 if (do_swap_account)
2349 page_counter_uncharge(&memcg->memsw, nr_pages);
2350
2351 css_put_many(&memcg->css, nr_pages);
2352}
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2365{
2366 struct mem_cgroup *memcg;
2367 unsigned short id;
2368 swp_entry_t ent;
2369
2370 VM_BUG_ON_PAGE(!PageLocked(page), page);
2371
2372 memcg = page->mem_cgroup;
2373 if (memcg) {
2374 if (!css_tryget_online(&memcg->css))
2375 memcg = NULL;
2376 } else if (PageSwapCache(page)) {
2377 ent.val = page_private(page);
2378 id = lookup_swap_cgroup_id(ent);
2379 rcu_read_lock();
2380 memcg = mem_cgroup_from_id(id);
2381 if (memcg && !css_tryget_online(&memcg->css))
2382 memcg = NULL;
2383 rcu_read_unlock();
2384 }
2385 return memcg;
2386}
2387
2388static void lock_page_lru(struct page *page, int *isolated)
2389{
2390 struct zone *zone = page_zone(page);
2391
2392 spin_lock_irq(&zone->lru_lock);
2393 if (PageLRU(page)) {
2394 struct lruvec *lruvec;
2395
2396 lruvec = mem_cgroup_page_lruvec(page, zone);
2397 ClearPageLRU(page);
2398 del_page_from_lru_list(page, lruvec, page_lru(page));
2399 *isolated = 1;
2400 } else
2401 *isolated = 0;
2402}
2403
2404static void unlock_page_lru(struct page *page, int isolated)
2405{
2406 struct zone *zone = page_zone(page);
2407
2408 if (isolated) {
2409 struct lruvec *lruvec;
2410
2411 lruvec = mem_cgroup_page_lruvec(page, zone);
2412 VM_BUG_ON_PAGE(PageLRU(page), page);
2413 SetPageLRU(page);
2414 add_page_to_lru_list(page, lruvec, page_lru(page));
2415 }
2416 spin_unlock_irq(&zone->lru_lock);
2417}
2418
2419static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2420 bool lrucare)
2421{
2422 int isolated;
2423
2424 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2425
2426
2427
2428
2429
2430 if (lrucare)
2431 lock_page_lru(page, &isolated);
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447 page->mem_cgroup = memcg;
2448
2449 if (lrucare)
2450 unlock_page_lru(page, isolated);
2451}
2452
2453#ifdef CONFIG_MEMCG_KMEM
2454int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2455 unsigned long nr_pages)
2456{
2457 struct page_counter *counter;
2458 int ret = 0;
2459
2460 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
2461 if (ret < 0)
2462 return ret;
2463
2464 ret = try_charge(memcg, gfp, nr_pages);
2465 if (ret == -EINTR) {
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 page_counter_charge(&memcg->memory, nr_pages);
2482 if (do_swap_account)
2483 page_counter_charge(&memcg->memsw, nr_pages);
2484 css_get_many(&memcg->css, nr_pages);
2485 ret = 0;
2486 } else if (ret)
2487 page_counter_uncharge(&memcg->kmem, nr_pages);
2488
2489 return ret;
2490}
2491
2492void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
2493{
2494 page_counter_uncharge(&memcg->memory, nr_pages);
2495 if (do_swap_account)
2496 page_counter_uncharge(&memcg->memsw, nr_pages);
2497
2498 page_counter_uncharge(&memcg->kmem, nr_pages);
2499
2500 css_put_many(&memcg->css, nr_pages);
2501}
2502
2503
2504
2505
2506
2507
2508int memcg_cache_id(struct mem_cgroup *memcg)
2509{
2510 return memcg ? memcg->kmemcg_id : -1;
2511}
2512
2513static int memcg_alloc_cache_id(void)
2514{
2515 int id, size;
2516 int err;
2517
2518 id = ida_simple_get(&memcg_cache_ida,
2519 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2520 if (id < 0)
2521 return id;
2522
2523 if (id < memcg_nr_cache_ids)
2524 return id;
2525
2526
2527
2528
2529
2530 down_write(&memcg_cache_ids_sem);
2531
2532 size = 2 * (id + 1);
2533 if (size < MEMCG_CACHES_MIN_SIZE)
2534 size = MEMCG_CACHES_MIN_SIZE;
2535 else if (size > MEMCG_CACHES_MAX_SIZE)
2536 size = MEMCG_CACHES_MAX_SIZE;
2537
2538 err = memcg_update_all_caches(size);
2539 if (!err)
2540 err = memcg_update_all_list_lrus(size);
2541 if (!err)
2542 memcg_nr_cache_ids = size;
2543
2544 up_write(&memcg_cache_ids_sem);
2545
2546 if (err) {
2547 ida_simple_remove(&memcg_cache_ida, id);
2548 return err;
2549 }
2550 return id;
2551}
2552
2553static void memcg_free_cache_id(int id)
2554{
2555 ida_simple_remove(&memcg_cache_ida, id);
2556}
2557
2558struct memcg_kmem_cache_create_work {
2559 struct mem_cgroup *memcg;
2560 struct kmem_cache *cachep;
2561 struct work_struct work;
2562};
2563
2564static void memcg_kmem_cache_create_func(struct work_struct *w)
2565{
2566 struct memcg_kmem_cache_create_work *cw =
2567 container_of(w, struct memcg_kmem_cache_create_work, work);
2568 struct mem_cgroup *memcg = cw->memcg;
2569 struct kmem_cache *cachep = cw->cachep;
2570
2571 memcg_create_kmem_cache(memcg, cachep);
2572
2573 css_put(&memcg->css);
2574 kfree(cw);
2575}
2576
2577
2578
2579
2580static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2581 struct kmem_cache *cachep)
2582{
2583 struct memcg_kmem_cache_create_work *cw;
2584
2585 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2586 if (!cw)
2587 return;
2588
2589 css_get(&memcg->css);
2590
2591 cw->memcg = memcg;
2592 cw->cachep = cachep;
2593 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2594
2595 schedule_work(&cw->work);
2596}
2597
2598static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2599 struct kmem_cache *cachep)
2600{
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612 current->memcg_kmem_skip_account = 1;
2613 __memcg_schedule_kmem_cache_create(memcg, cachep);
2614 current->memcg_kmem_skip_account = 0;
2615}
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2631{
2632 struct mem_cgroup *memcg;
2633 struct kmem_cache *memcg_cachep;
2634 int kmemcg_id;
2635
2636 VM_BUG_ON(!is_root_cache(cachep));
2637
2638 if (current->memcg_kmem_skip_account)
2639 return cachep;
2640
2641 memcg = get_mem_cgroup_from_mm(current->mm);
2642 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2643 if (kmemcg_id < 0)
2644 goto out;
2645
2646 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2647 if (likely(memcg_cachep))
2648 return memcg_cachep;
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662 memcg_schedule_kmem_cache_create(memcg, cachep);
2663out:
2664 css_put(&memcg->css);
2665 return cachep;
2666}
2667
2668void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2669{
2670 if (!is_root_cache(cachep))
2671 css_put(&cachep->memcg_params.memcg->css);
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688bool
2689__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
2690{
2691 struct mem_cgroup *memcg;
2692 int ret;
2693
2694 *_memcg = NULL;
2695
2696 memcg = get_mem_cgroup_from_mm(current->mm);
2697
2698 if (!memcg_kmem_is_active(memcg)) {
2699 css_put(&memcg->css);
2700 return true;
2701 }
2702
2703 ret = memcg_charge_kmem(memcg, gfp, 1 << order);
2704 if (!ret)
2705 *_memcg = memcg;
2706
2707 css_put(&memcg->css);
2708 return (ret == 0);
2709}
2710
2711void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
2712 int order)
2713{
2714 VM_BUG_ON(mem_cgroup_is_root(memcg));
2715
2716
2717 if (!page) {
2718 memcg_uncharge_kmem(memcg, 1 << order);
2719 return;
2720 }
2721 page->mem_cgroup = memcg;
2722}
2723
2724void __memcg_kmem_uncharge_pages(struct page *page, int order)
2725{
2726 struct mem_cgroup *memcg = page->mem_cgroup;
2727
2728 if (!memcg)
2729 return;
2730
2731 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2732
2733 memcg_uncharge_kmem(memcg, 1 << order);
2734 page->mem_cgroup = NULL;
2735}
2736
2737struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
2738{
2739 struct mem_cgroup *memcg = NULL;
2740 struct kmem_cache *cachep;
2741 struct page *page;
2742
2743 page = virt_to_head_page(ptr);
2744 if (PageSlab(page)) {
2745 cachep = page->slab_cache;
2746 if (!is_root_cache(cachep))
2747 memcg = cachep->memcg_params.memcg;
2748 } else
2749
2750 memcg = page->mem_cgroup;
2751
2752 return memcg;
2753}
2754#endif
2755
2756#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2757
2758
2759
2760
2761
2762
2763
2764void mem_cgroup_split_huge_fixup(struct page *head)
2765{
2766 int i;
2767
2768 if (mem_cgroup_disabled())
2769 return;
2770
2771 for (i = 1; i < HPAGE_PMD_NR; i++)
2772 head[i].mem_cgroup = head->mem_cgroup;
2773
2774 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2775 HPAGE_PMD_NR);
2776}
2777#endif
2778
2779#ifdef CONFIG_MEMCG_SWAP
2780static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2781 bool charge)
2782{
2783 int val = (charge) ? 1 : -1;
2784 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2785}
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static int mem_cgroup_move_swap_account(swp_entry_t entry,
2802 struct mem_cgroup *from, struct mem_cgroup *to)
2803{
2804 unsigned short old_id, new_id;
2805
2806 old_id = mem_cgroup_id(from);
2807 new_id = mem_cgroup_id(to);
2808
2809 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2810 mem_cgroup_swap_statistics(from, false);
2811 mem_cgroup_swap_statistics(to, true);
2812 return 0;
2813 }
2814 return -EINVAL;
2815}
2816#else
2817static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2818 struct mem_cgroup *from, struct mem_cgroup *to)
2819{
2820 return -EINVAL;
2821}
2822#endif
2823
2824static DEFINE_MUTEX(memcg_limit_mutex);
2825
2826static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2827 unsigned long limit)
2828{
2829 unsigned long curusage;
2830 unsigned long oldusage;
2831 bool enlarge = false;
2832 int retry_count;
2833 int ret;
2834
2835
2836
2837
2838
2839
2840 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2841 mem_cgroup_count_children(memcg);
2842
2843 oldusage = page_counter_read(&memcg->memory);
2844
2845 do {
2846 if (signal_pending(current)) {
2847 ret = -EINTR;
2848 break;
2849 }
2850
2851 mutex_lock(&memcg_limit_mutex);
2852 if (limit > memcg->memsw.limit) {
2853 mutex_unlock(&memcg_limit_mutex);
2854 ret = -EINVAL;
2855 break;
2856 }
2857 if (limit > memcg->memory.limit)
2858 enlarge = true;
2859 ret = page_counter_limit(&memcg->memory, limit);
2860 mutex_unlock(&memcg_limit_mutex);
2861
2862 if (!ret)
2863 break;
2864
2865 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2866
2867 curusage = page_counter_read(&memcg->memory);
2868
2869 if (curusage >= oldusage)
2870 retry_count--;
2871 else
2872 oldusage = curusage;
2873 } while (retry_count);
2874
2875 if (!ret && enlarge)
2876 memcg_oom_recover(memcg);
2877
2878 return ret;
2879}
2880
2881static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2882 unsigned long limit)
2883{
2884 unsigned long curusage;
2885 unsigned long oldusage;
2886 bool enlarge = false;
2887 int retry_count;
2888 int ret;
2889
2890
2891 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2892 mem_cgroup_count_children(memcg);
2893
2894 oldusage = page_counter_read(&memcg->memsw);
2895
2896 do {
2897 if (signal_pending(current)) {
2898 ret = -EINTR;
2899 break;
2900 }
2901
2902 mutex_lock(&memcg_limit_mutex);
2903 if (limit < memcg->memory.limit) {
2904 mutex_unlock(&memcg_limit_mutex);
2905 ret = -EINVAL;
2906 break;
2907 }
2908 if (limit > memcg->memsw.limit)
2909 enlarge = true;
2910 ret = page_counter_limit(&memcg->memsw, limit);
2911 mutex_unlock(&memcg_limit_mutex);
2912
2913 if (!ret)
2914 break;
2915
2916 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2917
2918 curusage = page_counter_read(&memcg->memsw);
2919
2920 if (curusage >= oldusage)
2921 retry_count--;
2922 else
2923 oldusage = curusage;
2924 } while (retry_count);
2925
2926 if (!ret && enlarge)
2927 memcg_oom_recover(memcg);
2928
2929 return ret;
2930}
2931
2932unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2933 gfp_t gfp_mask,
2934 unsigned long *total_scanned)
2935{
2936 unsigned long nr_reclaimed = 0;
2937 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2938 unsigned long reclaimed;
2939 int loop = 0;
2940 struct mem_cgroup_tree_per_zone *mctz;
2941 unsigned long excess;
2942 unsigned long nr_scanned;
2943
2944 if (order > 0)
2945 return 0;
2946
2947 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2948
2949
2950
2951
2952
2953 do {
2954 if (next_mz)
2955 mz = next_mz;
2956 else
2957 mz = mem_cgroup_largest_soft_limit_node(mctz);
2958 if (!mz)
2959 break;
2960
2961 nr_scanned = 0;
2962 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2963 gfp_mask, &nr_scanned);
2964 nr_reclaimed += reclaimed;
2965 *total_scanned += nr_scanned;
2966 spin_lock_irq(&mctz->lock);
2967 __mem_cgroup_remove_exceeded(mz, mctz);
2968
2969
2970
2971
2972
2973 next_mz = NULL;
2974 if (!reclaimed)
2975 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2976
2977 excess = soft_limit_excess(mz->memcg);
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2988 spin_unlock_irq(&mctz->lock);
2989 css_put(&mz->memcg->css);
2990 loop++;
2991
2992
2993
2994
2995
2996 if (!nr_reclaimed &&
2997 (next_mz == NULL ||
2998 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2999 break;
3000 } while (!nr_reclaimed);
3001 if (next_mz)
3002 css_put(&next_mz->memcg->css);
3003 return nr_reclaimed;
3004}
3005
3006
3007
3008
3009
3010
3011
3012static inline bool memcg_has_children(struct mem_cgroup *memcg)
3013{
3014 bool ret;
3015
3016
3017
3018
3019
3020
3021
3022 lockdep_assert_held(&memcg_create_mutex);
3023
3024 rcu_read_lock();
3025 ret = css_next_child(NULL, &memcg->css);
3026 rcu_read_unlock();
3027 return ret;
3028}
3029
3030
3031
3032
3033
3034
3035
3036static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3037{
3038 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3039
3040
3041 lru_add_drain_all();
3042
3043 while (nr_retries && page_counter_read(&memcg->memory)) {
3044 int progress;
3045
3046 if (signal_pending(current))
3047 return -EINTR;
3048
3049 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3050 GFP_KERNEL, true);
3051 if (!progress) {
3052 nr_retries--;
3053
3054 congestion_wait(BLK_RW_ASYNC, HZ/10);
3055 }
3056
3057 }
3058
3059 return 0;
3060}
3061
3062static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3063 char *buf, size_t nbytes,
3064 loff_t off)
3065{
3066 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3067
3068 if (mem_cgroup_is_root(memcg))
3069 return -EINVAL;
3070 return mem_cgroup_force_empty(memcg) ?: nbytes;
3071}
3072
3073static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3074 struct cftype *cft)
3075{
3076 return mem_cgroup_from_css(css)->use_hierarchy;
3077}
3078
3079static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3080 struct cftype *cft, u64 val)
3081{
3082 int retval = 0;
3083 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3084 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3085
3086 mutex_lock(&memcg_create_mutex);
3087
3088 if (memcg->use_hierarchy == val)
3089 goto out;
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3100 (val == 1 || val == 0)) {
3101 if (!memcg_has_children(memcg))
3102 memcg->use_hierarchy = val;
3103 else
3104 retval = -EBUSY;
3105 } else
3106 retval = -EINVAL;
3107
3108out:
3109 mutex_unlock(&memcg_create_mutex);
3110
3111 return retval;
3112}
3113
3114static unsigned long tree_stat(struct mem_cgroup *memcg,
3115 enum mem_cgroup_stat_index idx)
3116{
3117 struct mem_cgroup *iter;
3118 long val = 0;
3119
3120
3121 for_each_mem_cgroup_tree(iter, memcg)
3122 val += mem_cgroup_read_stat(iter, idx);
3123
3124 if (val < 0)
3125 val = 0;
3126 return val;
3127}
3128
3129static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3130{
3131 u64 val;
3132
3133 if (mem_cgroup_is_root(memcg)) {
3134 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
3135 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
3136 if (swap)
3137 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
3138 } else {
3139 if (!swap)
3140 val = page_counter_read(&memcg->memory);
3141 else
3142 val = page_counter_read(&memcg->memsw);
3143 }
3144 return val << PAGE_SHIFT;
3145}
3146
3147enum {
3148 RES_USAGE,
3149 RES_LIMIT,
3150 RES_MAX_USAGE,
3151 RES_FAILCNT,
3152 RES_SOFT_LIMIT,
3153};
3154
3155static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3156 struct cftype *cft)
3157{
3158 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3159 struct page_counter *counter;
3160
3161 switch (MEMFILE_TYPE(cft->private)) {
3162 case _MEM:
3163 counter = &memcg->memory;
3164 break;
3165 case _MEMSWAP:
3166 counter = &memcg->memsw;
3167 break;
3168 case _KMEM:
3169 counter = &memcg->kmem;
3170 break;
3171 default:
3172 BUG();
3173 }
3174
3175 switch (MEMFILE_ATTR(cft->private)) {
3176 case RES_USAGE:
3177 if (counter == &memcg->memory)
3178 return mem_cgroup_usage(memcg, false);
3179 if (counter == &memcg->memsw)
3180 return mem_cgroup_usage(memcg, true);
3181 return (u64)page_counter_read(counter) * PAGE_SIZE;
3182 case RES_LIMIT:
3183 return (u64)counter->limit * PAGE_SIZE;
3184 case RES_MAX_USAGE:
3185 return (u64)counter->watermark * PAGE_SIZE;
3186 case RES_FAILCNT:
3187 return counter->failcnt;
3188 case RES_SOFT_LIMIT:
3189 return (u64)memcg->soft_limit * PAGE_SIZE;
3190 default:
3191 BUG();
3192 }
3193}
3194
3195#ifdef CONFIG_MEMCG_KMEM
3196static int memcg_activate_kmem(struct mem_cgroup *memcg,
3197 unsigned long nr_pages)
3198{
3199 int err = 0;
3200 int memcg_id;
3201
3202 BUG_ON(memcg->kmemcg_id >= 0);
3203 BUG_ON(memcg->kmem_acct_activated);
3204 BUG_ON(memcg->kmem_acct_active);
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218 mutex_lock(&memcg_create_mutex);
3219 if (cgroup_has_tasks(memcg->css.cgroup) ||
3220 (memcg->use_hierarchy && memcg_has_children(memcg)))
3221 err = -EBUSY;
3222 mutex_unlock(&memcg_create_mutex);
3223 if (err)
3224 goto out;
3225
3226 memcg_id = memcg_alloc_cache_id();
3227 if (memcg_id < 0) {
3228 err = memcg_id;
3229 goto out;
3230 }
3231
3232
3233
3234
3235
3236 err = page_counter_limit(&memcg->kmem, nr_pages);
3237 VM_BUG_ON(err);
3238
3239 static_key_slow_inc(&memcg_kmem_enabled_key);
3240
3241
3242
3243
3244
3245
3246 memcg->kmemcg_id = memcg_id;
3247 memcg->kmem_acct_activated = true;
3248 memcg->kmem_acct_active = true;
3249out:
3250 return err;
3251}
3252
3253static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3254 unsigned long limit)
3255{
3256 int ret;
3257
3258 mutex_lock(&memcg_limit_mutex);
3259 if (!memcg_kmem_is_active(memcg))
3260 ret = memcg_activate_kmem(memcg, limit);
3261 else
3262 ret = page_counter_limit(&memcg->kmem, limit);
3263 mutex_unlock(&memcg_limit_mutex);
3264 return ret;
3265}
3266
3267static int memcg_propagate_kmem(struct mem_cgroup *memcg)
3268{
3269 int ret = 0;
3270 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3271
3272 if (!parent)
3273 return 0;
3274
3275 mutex_lock(&memcg_limit_mutex);
3276
3277
3278
3279
3280 if (memcg_kmem_is_active(parent))
3281 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
3282 mutex_unlock(&memcg_limit_mutex);
3283 return ret;
3284}
3285#else
3286static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3287 unsigned long limit)
3288{
3289 return -EINVAL;
3290}
3291#endif
3292
3293
3294
3295
3296
3297static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3298 char *buf, size_t nbytes, loff_t off)
3299{
3300 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3301 unsigned long nr_pages;
3302 int ret;
3303
3304 buf = strstrip(buf);
3305 ret = page_counter_memparse(buf, "-1", &nr_pages);
3306 if (ret)
3307 return ret;
3308
3309 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3310 case RES_LIMIT:
3311 if (mem_cgroup_is_root(memcg)) {
3312 ret = -EINVAL;
3313 break;
3314 }
3315 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3316 case _MEM:
3317 ret = mem_cgroup_resize_limit(memcg, nr_pages);
3318 break;
3319 case _MEMSWAP:
3320 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3321 break;
3322 case _KMEM:
3323 ret = memcg_update_kmem_limit(memcg, nr_pages);
3324 break;
3325 }
3326 break;
3327 case RES_SOFT_LIMIT:
3328 memcg->soft_limit = nr_pages;
3329 ret = 0;
3330 break;
3331 }
3332 return ret ?: nbytes;
3333}
3334
3335static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3336 size_t nbytes, loff_t off)
3337{
3338 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3339 struct page_counter *counter;
3340
3341 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3342 case _MEM:
3343 counter = &memcg->memory;
3344 break;
3345 case _MEMSWAP:
3346 counter = &memcg->memsw;
3347 break;
3348 case _KMEM:
3349 counter = &memcg->kmem;
3350 break;
3351 default:
3352 BUG();
3353 }
3354
3355 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3356 case RES_MAX_USAGE:
3357 page_counter_reset_watermark(counter);
3358 break;
3359 case RES_FAILCNT:
3360 counter->failcnt = 0;
3361 break;
3362 default:
3363 BUG();
3364 }
3365
3366 return nbytes;
3367}
3368
3369static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3370 struct cftype *cft)
3371{
3372 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3373}
3374
3375#ifdef CONFIG_MMU
3376static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3377 struct cftype *cft, u64 val)
3378{
3379 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3380
3381 if (val & ~MOVE_MASK)
3382 return -EINVAL;
3383
3384
3385
3386
3387
3388
3389
3390 memcg->move_charge_at_immigrate = val;
3391 return 0;
3392}
3393#else
3394static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3395 struct cftype *cft, u64 val)
3396{
3397 return -ENOSYS;
3398}
3399#endif
3400
3401#ifdef CONFIG_NUMA
3402static int memcg_numa_stat_show(struct seq_file *m, void *v)
3403{
3404 struct numa_stat {
3405 const char *name;
3406 unsigned int lru_mask;
3407 };
3408
3409 static const struct numa_stat stats[] = {
3410 { "total", LRU_ALL },
3411 { "file", LRU_ALL_FILE },
3412 { "anon", LRU_ALL_ANON },
3413 { "unevictable", BIT(LRU_UNEVICTABLE) },
3414 };
3415 const struct numa_stat *stat;
3416 int nid;
3417 unsigned long nr;
3418 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3419
3420 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3421 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3422 seq_printf(m, "%s=%lu", stat->name, nr);
3423 for_each_node_state(nid, N_MEMORY) {
3424 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3425 stat->lru_mask);
3426 seq_printf(m, " N%d=%lu", nid, nr);
3427 }
3428 seq_putc(m, '\n');
3429 }
3430
3431 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3432 struct mem_cgroup *iter;
3433
3434 nr = 0;
3435 for_each_mem_cgroup_tree(iter, memcg)
3436 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3437 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3438 for_each_node_state(nid, N_MEMORY) {
3439 nr = 0;
3440 for_each_mem_cgroup_tree(iter, memcg)
3441 nr += mem_cgroup_node_nr_lru_pages(
3442 iter, nid, stat->lru_mask);
3443 seq_printf(m, " N%d=%lu", nid, nr);
3444 }
3445 seq_putc(m, '\n');
3446 }
3447
3448 return 0;
3449}
3450#endif
3451
3452static int memcg_stat_show(struct seq_file *m, void *v)
3453{
3454 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3455 unsigned long memory, memsw;
3456 struct mem_cgroup *mi;
3457 unsigned int i;
3458
3459 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3460 MEM_CGROUP_STAT_NSTATS);
3461 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3462 MEM_CGROUP_EVENTS_NSTATS);
3463 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3464
3465 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3466 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3467 continue;
3468 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
3469 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3470 }
3471
3472 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3473 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3474 mem_cgroup_read_events(memcg, i));
3475
3476 for (i = 0; i < NR_LRU_LISTS; i++)
3477 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3478 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3479
3480
3481 memory = memsw = PAGE_COUNTER_MAX;
3482 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3483 memory = min(memory, mi->memory.limit);
3484 memsw = min(memsw, mi->memsw.limit);
3485 }
3486 seq_printf(m, "hierarchical_memory_limit %llu\n",
3487 (u64)memory * PAGE_SIZE);
3488 if (do_swap_account)
3489 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3490 (u64)memsw * PAGE_SIZE);
3491
3492 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3493 long long val = 0;
3494
3495 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3496 continue;
3497 for_each_mem_cgroup_tree(mi, memcg)
3498 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3499 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
3500 }
3501
3502 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3503 unsigned long long val = 0;
3504
3505 for_each_mem_cgroup_tree(mi, memcg)
3506 val += mem_cgroup_read_events(mi, i);
3507 seq_printf(m, "total_%s %llu\n",
3508 mem_cgroup_events_names[i], val);
3509 }
3510
3511 for (i = 0; i < NR_LRU_LISTS; i++) {
3512 unsigned long long val = 0;
3513
3514 for_each_mem_cgroup_tree(mi, memcg)
3515 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3516 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3517 }
3518
3519#ifdef CONFIG_DEBUG_VM
3520 {
3521 int nid, zid;
3522 struct mem_cgroup_per_zone *mz;
3523 struct zone_reclaim_stat *rstat;
3524 unsigned long recent_rotated[2] = {0, 0};
3525 unsigned long recent_scanned[2] = {0, 0};
3526
3527 for_each_online_node(nid)
3528 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3529 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3530 rstat = &mz->lruvec.reclaim_stat;
3531
3532 recent_rotated[0] += rstat->recent_rotated[0];
3533 recent_rotated[1] += rstat->recent_rotated[1];
3534 recent_scanned[0] += rstat->recent_scanned[0];
3535 recent_scanned[1] += rstat->recent_scanned[1];
3536 }
3537 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3538 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3539 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3540 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3541 }
3542#endif
3543
3544 return 0;
3545}
3546
3547static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3548 struct cftype *cft)
3549{
3550 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3551
3552 return mem_cgroup_swappiness(memcg);
3553}
3554
3555static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3556 struct cftype *cft, u64 val)
3557{
3558 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3559
3560 if (val > 100)
3561 return -EINVAL;
3562
3563 if (css->parent)
3564 memcg->swappiness = val;
3565 else
3566 vm_swappiness = val;
3567
3568 return 0;
3569}
3570
3571static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3572{
3573 struct mem_cgroup_threshold_ary *t;
3574 unsigned long usage;
3575 int i;
3576
3577 rcu_read_lock();
3578 if (!swap)
3579 t = rcu_dereference(memcg->thresholds.primary);
3580 else
3581 t = rcu_dereference(memcg->memsw_thresholds.primary);
3582
3583 if (!t)
3584 goto unlock;
3585
3586 usage = mem_cgroup_usage(memcg, swap);
3587
3588
3589
3590
3591
3592
3593 i = t->current_threshold;
3594
3595
3596
3597
3598
3599
3600
3601 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3602 eventfd_signal(t->entries[i].eventfd, 1);
3603
3604
3605 i++;
3606
3607
3608
3609
3610
3611
3612
3613 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3614 eventfd_signal(t->entries[i].eventfd, 1);
3615
3616
3617 t->current_threshold = i - 1;
3618unlock:
3619 rcu_read_unlock();
3620}
3621
3622static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3623{
3624 while (memcg) {
3625 __mem_cgroup_threshold(memcg, false);
3626 if (do_swap_account)
3627 __mem_cgroup_threshold(memcg, true);
3628
3629 memcg = parent_mem_cgroup(memcg);
3630 }
3631}
3632
3633static int compare_thresholds(const void *a, const void *b)
3634{
3635 const struct mem_cgroup_threshold *_a = a;
3636 const struct mem_cgroup_threshold *_b = b;
3637
3638 if (_a->threshold > _b->threshold)
3639 return 1;
3640
3641 if (_a->threshold < _b->threshold)
3642 return -1;
3643
3644 return 0;
3645}
3646
3647static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3648{
3649 struct mem_cgroup_eventfd_list *ev;
3650
3651 spin_lock(&memcg_oom_lock);
3652
3653 list_for_each_entry(ev, &memcg->oom_notify, list)
3654 eventfd_signal(ev->eventfd, 1);
3655
3656 spin_unlock(&memcg_oom_lock);
3657 return 0;
3658}
3659
3660static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3661{
3662 struct mem_cgroup *iter;
3663
3664 for_each_mem_cgroup_tree(iter, memcg)
3665 mem_cgroup_oom_notify_cb(iter);
3666}
3667
3668static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3669 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3670{
3671 struct mem_cgroup_thresholds *thresholds;
3672 struct mem_cgroup_threshold_ary *new;
3673 unsigned long threshold;
3674 unsigned long usage;
3675 int i, size, ret;
3676
3677 ret = page_counter_memparse(args, "-1", &threshold);
3678 if (ret)
3679 return ret;
3680
3681 mutex_lock(&memcg->thresholds_lock);
3682
3683 if (type == _MEM) {
3684 thresholds = &memcg->thresholds;
3685 usage = mem_cgroup_usage(memcg, false);
3686 } else if (type == _MEMSWAP) {
3687 thresholds = &memcg->memsw_thresholds;
3688 usage = mem_cgroup_usage(memcg, true);
3689 } else
3690 BUG();
3691
3692
3693 if (thresholds->primary)
3694 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3695
3696 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3697
3698
3699 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3700 GFP_KERNEL);
3701 if (!new) {
3702 ret = -ENOMEM;
3703 goto unlock;
3704 }
3705 new->size = size;
3706
3707
3708 if (thresholds->primary) {
3709 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3710 sizeof(struct mem_cgroup_threshold));
3711 }
3712
3713
3714 new->entries[size - 1].eventfd = eventfd;
3715 new->entries[size - 1].threshold = threshold;
3716
3717
3718 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3719 compare_thresholds, NULL);
3720
3721
3722 new->current_threshold = -1;
3723 for (i = 0; i < size; i++) {
3724 if (new->entries[i].threshold <= usage) {
3725
3726
3727
3728
3729
3730 ++new->current_threshold;
3731 } else
3732 break;
3733 }
3734
3735
3736 kfree(thresholds->spare);
3737 thresholds->spare = thresholds->primary;
3738
3739 rcu_assign_pointer(thresholds->primary, new);
3740
3741
3742 synchronize_rcu();
3743
3744unlock:
3745 mutex_unlock(&memcg->thresholds_lock);
3746
3747 return ret;
3748}
3749
3750static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3751 struct eventfd_ctx *eventfd, const char *args)
3752{
3753 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3754}
3755
3756static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3757 struct eventfd_ctx *eventfd, const char *args)
3758{
3759 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3760}
3761
3762static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3763 struct eventfd_ctx *eventfd, enum res_type type)
3764{
3765 struct mem_cgroup_thresholds *thresholds;
3766 struct mem_cgroup_threshold_ary *new;
3767 unsigned long usage;
3768 int i, j, size;
3769
3770 mutex_lock(&memcg->thresholds_lock);
3771
3772 if (type == _MEM) {
3773 thresholds = &memcg->thresholds;
3774 usage = mem_cgroup_usage(memcg, false);
3775 } else if (type == _MEMSWAP) {
3776 thresholds = &memcg->memsw_thresholds;
3777 usage = mem_cgroup_usage(memcg, true);
3778 } else
3779 BUG();
3780
3781 if (!thresholds->primary)
3782 goto unlock;
3783
3784
3785 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3786
3787
3788 size = 0;
3789 for (i = 0; i < thresholds->primary->size; i++) {
3790 if (thresholds->primary->entries[i].eventfd != eventfd)
3791 size++;
3792 }
3793
3794 new = thresholds->spare;
3795
3796
3797 if (!size) {
3798 kfree(new);
3799 new = NULL;
3800 goto swap_buffers;
3801 }
3802
3803 new->size = size;
3804
3805
3806 new->current_threshold = -1;
3807 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3808 if (thresholds->primary->entries[i].eventfd == eventfd)
3809 continue;
3810
3811 new->entries[j] = thresholds->primary->entries[i];
3812 if (new->entries[j].threshold <= usage) {
3813
3814
3815
3816
3817
3818 ++new->current_threshold;
3819 }
3820 j++;
3821 }
3822
3823swap_buffers:
3824
3825 thresholds->spare = thresholds->primary;
3826
3827 if (!new) {
3828 kfree(thresholds->spare);
3829 thresholds->spare = NULL;
3830 }
3831
3832 rcu_assign_pointer(thresholds->primary, new);
3833
3834
3835 synchronize_rcu();
3836unlock:
3837 mutex_unlock(&memcg->thresholds_lock);
3838}
3839
3840static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3841 struct eventfd_ctx *eventfd)
3842{
3843 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3844}
3845
3846static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3847 struct eventfd_ctx *eventfd)
3848{
3849 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3850}
3851
3852static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3853 struct eventfd_ctx *eventfd, const char *args)
3854{
3855 struct mem_cgroup_eventfd_list *event;
3856
3857 event = kmalloc(sizeof(*event), GFP_KERNEL);
3858 if (!event)
3859 return -ENOMEM;
3860
3861 spin_lock(&memcg_oom_lock);
3862
3863 event->eventfd = eventfd;
3864 list_add(&event->list, &memcg->oom_notify);
3865
3866
3867 if (atomic_read(&memcg->under_oom))
3868 eventfd_signal(eventfd, 1);
3869 spin_unlock(&memcg_oom_lock);
3870
3871 return 0;
3872}
3873
3874static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3875 struct eventfd_ctx *eventfd)
3876{
3877 struct mem_cgroup_eventfd_list *ev, *tmp;
3878
3879 spin_lock(&memcg_oom_lock);
3880
3881 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3882 if (ev->eventfd == eventfd) {
3883 list_del(&ev->list);
3884 kfree(ev);
3885 }
3886 }
3887
3888 spin_unlock(&memcg_oom_lock);
3889}
3890
3891static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3892{
3893 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3894
3895 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3896 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
3897 return 0;
3898}
3899
3900static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3901 struct cftype *cft, u64 val)
3902{
3903 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3904
3905
3906 if (!css->parent || !((val == 0) || (val == 1)))
3907 return -EINVAL;
3908
3909 memcg->oom_kill_disable = val;
3910 if (!val)
3911 memcg_oom_recover(memcg);
3912
3913 return 0;
3914}
3915
3916#ifdef CONFIG_MEMCG_KMEM
3917static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3918{
3919 int ret;
3920
3921 ret = memcg_propagate_kmem(memcg);
3922 if (ret)
3923 return ret;
3924
3925 return mem_cgroup_sockets_init(memcg, ss);
3926}
3927
3928static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3929{
3930 struct cgroup_subsys_state *css;
3931 struct mem_cgroup *parent, *child;
3932 int kmemcg_id;
3933
3934 if (!memcg->kmem_acct_active)
3935 return;
3936
3937
3938
3939
3940
3941
3942
3943 memcg->kmem_acct_active = false;
3944
3945 memcg_deactivate_kmem_caches(memcg);
3946
3947 kmemcg_id = memcg->kmemcg_id;
3948 BUG_ON(kmemcg_id < 0);
3949
3950 parent = parent_mem_cgroup(memcg);
3951 if (!parent)
3952 parent = root_mem_cgroup;
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962 css_for_each_descendant_pre(css, &memcg->css) {
3963 child = mem_cgroup_from_css(css);
3964 BUG_ON(child->kmemcg_id != kmemcg_id);
3965 child->kmemcg_id = parent->kmemcg_id;
3966 if (!memcg->use_hierarchy)
3967 break;
3968 }
3969 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
3970
3971 memcg_free_cache_id(kmemcg_id);
3972}
3973
3974static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3975{
3976 if (memcg->kmem_acct_activated) {
3977 memcg_destroy_kmem_caches(memcg);
3978 static_key_slow_dec(&memcg_kmem_enabled_key);
3979 WARN_ON(page_counter_read(&memcg->kmem));
3980 }
3981 mem_cgroup_sockets_destroy(memcg);
3982}
3983#else
3984static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3985{
3986 return 0;
3987}
3988
3989static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3990{
3991}
3992
3993static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3994{
3995}
3996#endif
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016static void memcg_event_remove(struct work_struct *work)
4017{
4018 struct mem_cgroup_event *event =
4019 container_of(work, struct mem_cgroup_event, remove);
4020 struct mem_cgroup *memcg = event->memcg;
4021
4022 remove_wait_queue(event->wqh, &event->wait);
4023
4024 event->unregister_event(memcg, event->eventfd);
4025
4026
4027 eventfd_signal(event->eventfd, 1);
4028
4029 eventfd_ctx_put(event->eventfd);
4030 kfree(event);
4031 css_put(&memcg->css);
4032}
4033
4034
4035
4036
4037
4038
4039static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
4040 int sync, void *key)
4041{
4042 struct mem_cgroup_event *event =
4043 container_of(wait, struct mem_cgroup_event, wait);
4044 struct mem_cgroup *memcg = event->memcg;
4045 unsigned long flags = (unsigned long)key;
4046
4047 if (flags & POLLHUP) {
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 spin_lock(&memcg->event_list_lock);
4058 if (!list_empty(&event->list)) {
4059 list_del_init(&event->list);
4060
4061
4062
4063
4064 schedule_work(&event->remove);
4065 }
4066 spin_unlock(&memcg->event_list_lock);
4067 }
4068
4069 return 0;
4070}
4071
4072static void memcg_event_ptable_queue_proc(struct file *file,
4073 wait_queue_head_t *wqh, poll_table *pt)
4074{
4075 struct mem_cgroup_event *event =
4076 container_of(pt, struct mem_cgroup_event, pt);
4077
4078 event->wqh = wqh;
4079 add_wait_queue(wqh, &event->wait);
4080}
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4091 char *buf, size_t nbytes, loff_t off)
4092{
4093 struct cgroup_subsys_state *css = of_css(of);
4094 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4095 struct mem_cgroup_event *event;
4096 struct cgroup_subsys_state *cfile_css;
4097 unsigned int efd, cfd;
4098 struct fd efile;
4099 struct fd cfile;
4100 const char *name;
4101 char *endp;
4102 int ret;
4103
4104 buf = strstrip(buf);
4105
4106 efd = simple_strtoul(buf, &endp, 10);
4107 if (*endp != ' ')
4108 return -EINVAL;
4109 buf = endp + 1;
4110
4111 cfd = simple_strtoul(buf, &endp, 10);
4112 if ((*endp != ' ') && (*endp != '\0'))
4113 return -EINVAL;
4114 buf = endp + 1;
4115
4116 event = kzalloc(sizeof(*event), GFP_KERNEL);
4117 if (!event)
4118 return -ENOMEM;
4119
4120 event->memcg = memcg;
4121 INIT_LIST_HEAD(&event->list);
4122 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4123 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4124 INIT_WORK(&event->remove, memcg_event_remove);
4125
4126 efile = fdget(efd);
4127 if (!efile.file) {
4128 ret = -EBADF;
4129 goto out_kfree;
4130 }
4131
4132 event->eventfd = eventfd_ctx_fileget(efile.file);
4133 if (IS_ERR(event->eventfd)) {
4134 ret = PTR_ERR(event->eventfd);
4135 goto out_put_efile;
4136 }
4137
4138 cfile = fdget(cfd);
4139 if (!cfile.file) {
4140 ret = -EBADF;
4141 goto out_put_eventfd;
4142 }
4143
4144
4145
4146 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4147 if (ret < 0)
4148 goto out_put_cfile;
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158 name = cfile.file->f_path.dentry->d_name.name;
4159
4160 if (!strcmp(name, "memory.usage_in_bytes")) {
4161 event->register_event = mem_cgroup_usage_register_event;
4162 event->unregister_event = mem_cgroup_usage_unregister_event;
4163 } else if (!strcmp(name, "memory.oom_control")) {
4164 event->register_event = mem_cgroup_oom_register_event;
4165 event->unregister_event = mem_cgroup_oom_unregister_event;
4166 } else if (!strcmp(name, "memory.pressure_level")) {
4167 event->register_event = vmpressure_register_event;
4168 event->unregister_event = vmpressure_unregister_event;
4169 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4170 event->register_event = memsw_cgroup_usage_register_event;
4171 event->unregister_event = memsw_cgroup_usage_unregister_event;
4172 } else {
4173 ret = -EINVAL;
4174 goto out_put_cfile;
4175 }
4176
4177
4178
4179
4180
4181
4182 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4183 &memory_cgrp_subsys);
4184 ret = -EINVAL;
4185 if (IS_ERR(cfile_css))
4186 goto out_put_cfile;
4187 if (cfile_css != css) {
4188 css_put(cfile_css);
4189 goto out_put_cfile;
4190 }
4191
4192 ret = event->register_event(memcg, event->eventfd, buf);
4193 if (ret)
4194 goto out_put_css;
4195
4196 efile.file->f_op->poll(efile.file, &event->pt);
4197
4198 spin_lock(&memcg->event_list_lock);
4199 list_add(&event->list, &memcg->event_list);
4200 spin_unlock(&memcg->event_list_lock);
4201
4202 fdput(cfile);
4203 fdput(efile);
4204
4205 return nbytes;
4206
4207out_put_css:
4208 css_put(css);
4209out_put_cfile:
4210 fdput(cfile);
4211out_put_eventfd:
4212 eventfd_ctx_put(event->eventfd);
4213out_put_efile:
4214 fdput(efile);
4215out_kfree:
4216 kfree(event);
4217
4218 return ret;
4219}
4220
4221static struct cftype mem_cgroup_legacy_files[] = {
4222 {
4223 .name = "usage_in_bytes",
4224 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4225 .read_u64 = mem_cgroup_read_u64,
4226 },
4227 {
4228 .name = "max_usage_in_bytes",
4229 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4230 .write = mem_cgroup_reset,
4231 .read_u64 = mem_cgroup_read_u64,
4232 },
4233 {
4234 .name = "limit_in_bytes",
4235 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4236 .write = mem_cgroup_write,
4237 .read_u64 = mem_cgroup_read_u64,
4238 },
4239 {
4240 .name = "soft_limit_in_bytes",
4241 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4242 .write = mem_cgroup_write,
4243 .read_u64 = mem_cgroup_read_u64,
4244 },
4245 {
4246 .name = "failcnt",
4247 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4248 .write = mem_cgroup_reset,
4249 .read_u64 = mem_cgroup_read_u64,
4250 },
4251 {
4252 .name = "stat",
4253 .seq_show = memcg_stat_show,
4254 },
4255 {
4256 .name = "force_empty",
4257 .write = mem_cgroup_force_empty_write,
4258 },
4259 {
4260 .name = "use_hierarchy",
4261 .write_u64 = mem_cgroup_hierarchy_write,
4262 .read_u64 = mem_cgroup_hierarchy_read,
4263 },
4264 {
4265 .name = "cgroup.event_control",
4266 .write = memcg_write_event_control,
4267 .flags = CFTYPE_NO_PREFIX,
4268 .mode = S_IWUGO,
4269 },
4270 {
4271 .name = "swappiness",
4272 .read_u64 = mem_cgroup_swappiness_read,
4273 .write_u64 = mem_cgroup_swappiness_write,
4274 },
4275 {
4276 .name = "move_charge_at_immigrate",
4277 .read_u64 = mem_cgroup_move_charge_read,
4278 .write_u64 = mem_cgroup_move_charge_write,
4279 },
4280 {
4281 .name = "oom_control",
4282 .seq_show = mem_cgroup_oom_control_read,
4283 .write_u64 = mem_cgroup_oom_control_write,
4284 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4285 },
4286 {
4287 .name = "pressure_level",
4288 },
4289#ifdef CONFIG_NUMA
4290 {
4291 .name = "numa_stat",
4292 .seq_show = memcg_numa_stat_show,
4293 },
4294#endif
4295#ifdef CONFIG_MEMCG_KMEM
4296 {
4297 .name = "kmem.limit_in_bytes",
4298 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4299 .write = mem_cgroup_write,
4300 .read_u64 = mem_cgroup_read_u64,
4301 },
4302 {
4303 .name = "kmem.usage_in_bytes",
4304 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4305 .read_u64 = mem_cgroup_read_u64,
4306 },
4307 {
4308 .name = "kmem.failcnt",
4309 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4310 .write = mem_cgroup_reset,
4311 .read_u64 = mem_cgroup_read_u64,
4312 },
4313 {
4314 .name = "kmem.max_usage_in_bytes",
4315 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4316 .write = mem_cgroup_reset,
4317 .read_u64 = mem_cgroup_read_u64,
4318 },
4319#ifdef CONFIG_SLABINFO
4320 {
4321 .name = "kmem.slabinfo",
4322 .seq_start = slab_start,
4323 .seq_next = slab_next,
4324 .seq_stop = slab_stop,
4325 .seq_show = memcg_slab_show,
4326 },
4327#endif
4328#endif
4329 { },
4330};
4331
4332static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4333{
4334 struct mem_cgroup_per_node *pn;
4335 struct mem_cgroup_per_zone *mz;
4336 int zone, tmp = node;
4337
4338
4339
4340
4341
4342
4343
4344
4345 if (!node_state(node, N_NORMAL_MEMORY))
4346 tmp = -1;
4347 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4348 if (!pn)
4349 return 1;
4350
4351 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4352 mz = &pn->zoneinfo[zone];
4353 lruvec_init(&mz->lruvec);
4354 mz->usage_in_excess = 0;
4355 mz->on_tree = false;
4356 mz->memcg = memcg;
4357 }
4358 memcg->nodeinfo[node] = pn;
4359 return 0;
4360}
4361
4362static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4363{
4364 kfree(memcg->nodeinfo[node]);
4365}
4366
4367static struct mem_cgroup *mem_cgroup_alloc(void)
4368{
4369 struct mem_cgroup *memcg;
4370 size_t size;
4371
4372 size = sizeof(struct mem_cgroup);
4373 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4374
4375 memcg = kzalloc(size, GFP_KERNEL);
4376 if (!memcg)
4377 return NULL;
4378
4379 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4380 if (!memcg->stat)
4381 goto out_free;
4382 spin_lock_init(&memcg->pcp_counter_lock);
4383 return memcg;
4384
4385out_free:
4386 kfree(memcg);
4387 return NULL;
4388}
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401static void __mem_cgroup_free(struct mem_cgroup *memcg)
4402{
4403 int node;
4404
4405 mem_cgroup_remove_from_trees(memcg);
4406
4407 for_each_node(node)
4408 free_mem_cgroup_per_zone_info(memcg, node);
4409
4410 free_percpu(memcg->stat);
4411 kfree(memcg);
4412}
4413
4414
4415
4416
4417struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4418{
4419 if (!memcg->memory.parent)
4420 return NULL;
4421 return mem_cgroup_from_counter(memcg->memory.parent, memory);
4422}
4423EXPORT_SYMBOL(parent_mem_cgroup);
4424
4425static struct cgroup_subsys_state * __ref
4426mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4427{
4428 struct mem_cgroup *memcg;
4429 long error = -ENOMEM;
4430 int node;
4431
4432 memcg = mem_cgroup_alloc();
4433 if (!memcg)
4434 return ERR_PTR(error);
4435
4436 for_each_node(node)
4437 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4438 goto free_out;
4439
4440
4441 if (parent_css == NULL) {
4442 root_mem_cgroup = memcg;
4443 page_counter_init(&memcg->memory, NULL);
4444 memcg->high = PAGE_COUNTER_MAX;
4445 memcg->soft_limit = PAGE_COUNTER_MAX;
4446 page_counter_init(&memcg->memsw, NULL);
4447 page_counter_init(&memcg->kmem, NULL);
4448 }
4449
4450 memcg->last_scanned_node = MAX_NUMNODES;
4451 INIT_LIST_HEAD(&memcg->oom_notify);
4452 memcg->move_charge_at_immigrate = 0;
4453 mutex_init(&memcg->thresholds_lock);
4454 spin_lock_init(&memcg->move_lock);
4455 vmpressure_init(&memcg->vmpressure);
4456 INIT_LIST_HEAD(&memcg->event_list);
4457 spin_lock_init(&memcg->event_list_lock);
4458#ifdef CONFIG_MEMCG_KMEM
4459 memcg->kmemcg_id = -1;
4460#endif
4461
4462 return &memcg->css;
4463
4464free_out:
4465 __mem_cgroup_free(memcg);
4466 return ERR_PTR(error);
4467}
4468
4469static int
4470mem_cgroup_css_online(struct cgroup_subsys_state *css)
4471{
4472 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4473 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
4474 int ret;
4475
4476 if (css->id > MEM_CGROUP_ID_MAX)
4477 return -ENOSPC;
4478
4479 if (!parent)
4480 return 0;
4481
4482 mutex_lock(&memcg_create_mutex);
4483
4484 memcg->use_hierarchy = parent->use_hierarchy;
4485 memcg->oom_kill_disable = parent->oom_kill_disable;
4486 memcg->swappiness = mem_cgroup_swappiness(parent);
4487
4488 if (parent->use_hierarchy) {
4489 page_counter_init(&memcg->memory, &parent->memory);
4490 memcg->high = PAGE_COUNTER_MAX;
4491 memcg->soft_limit = PAGE_COUNTER_MAX;
4492 page_counter_init(&memcg->memsw, &parent->memsw);
4493 page_counter_init(&memcg->kmem, &parent->kmem);
4494
4495
4496
4497
4498
4499 } else {
4500 page_counter_init(&memcg->memory, NULL);
4501 memcg->high = PAGE_COUNTER_MAX;
4502 memcg->soft_limit = PAGE_COUNTER_MAX;
4503 page_counter_init(&memcg->memsw, NULL);
4504 page_counter_init(&memcg->kmem, NULL);
4505
4506
4507
4508
4509
4510 if (parent != root_mem_cgroup)
4511 memory_cgrp_subsys.broken_hierarchy = true;
4512 }
4513 mutex_unlock(&memcg_create_mutex);
4514
4515 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
4516 if (ret)
4517 return ret;
4518
4519
4520
4521
4522
4523
4524 smp_store_release(&memcg->initialized, 1);
4525
4526 return 0;
4527}
4528
4529static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4530{
4531 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4532 struct mem_cgroup_event *event, *tmp;
4533
4534
4535
4536
4537
4538
4539 spin_lock(&memcg->event_list_lock);
4540 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4541 list_del_init(&event->list);
4542 schedule_work(&event->remove);
4543 }
4544 spin_unlock(&memcg->event_list_lock);
4545
4546 vmpressure_cleanup(&memcg->vmpressure);
4547
4548 memcg_deactivate_kmem(memcg);
4549}
4550
4551static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4552{
4553 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4554
4555 memcg_destroy_kmem(memcg);
4556 __mem_cgroup_free(memcg);
4557}
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4573{
4574 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4575
4576 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4577 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4578 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
4579 memcg->low = 0;
4580 memcg->high = PAGE_COUNTER_MAX;
4581 memcg->soft_limit = PAGE_COUNTER_MAX;
4582}
4583
4584#ifdef CONFIG_MMU
4585
4586static int mem_cgroup_do_precharge(unsigned long count)
4587{
4588 int ret;
4589
4590
4591 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
4592 if (!ret) {
4593 mc.precharge += count;
4594 return ret;
4595 }
4596 if (ret == -EINTR) {
4597 cancel_charge(root_mem_cgroup, count);
4598 return ret;
4599 }
4600
4601
4602 while (count--) {
4603 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4604
4605
4606
4607
4608
4609
4610 if (ret == -EINTR)
4611 cancel_charge(root_mem_cgroup, 1);
4612 if (ret)
4613 return ret;
4614 mc.precharge++;
4615 cond_resched();
4616 }
4617 return 0;
4618}
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638union mc_target {
4639 struct page *page;
4640 swp_entry_t ent;
4641};
4642
4643enum mc_target_type {
4644 MC_TARGET_NONE = 0,
4645 MC_TARGET_PAGE,
4646 MC_TARGET_SWAP,
4647};
4648
4649static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4650 unsigned long addr, pte_t ptent)
4651{
4652 struct page *page = vm_normal_page(vma, addr, ptent);
4653
4654 if (!page || !page_mapped(page))
4655 return NULL;
4656 if (PageAnon(page)) {
4657 if (!(mc.flags & MOVE_ANON))
4658 return NULL;
4659 } else {
4660 if (!(mc.flags & MOVE_FILE))
4661 return NULL;
4662 }
4663 if (!get_page_unless_zero(page))
4664 return NULL;
4665
4666 return page;
4667}
4668
4669#ifdef CONFIG_SWAP
4670static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4671 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4672{
4673 struct page *page = NULL;
4674 swp_entry_t ent = pte_to_swp_entry(ptent);
4675
4676 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4677 return NULL;
4678
4679
4680
4681
4682 page = find_get_page(swap_address_space(ent), ent.val);
4683 if (do_swap_account)
4684 entry->val = ent.val;
4685
4686 return page;
4687}
4688#else
4689static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4690 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4691{
4692 return NULL;
4693}
4694#endif
4695
4696static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4697 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4698{
4699 struct page *page = NULL;
4700 struct address_space *mapping;
4701 pgoff_t pgoff;
4702
4703 if (!vma->vm_file)
4704 return NULL;
4705 if (!(mc.flags & MOVE_FILE))
4706 return NULL;
4707
4708 mapping = vma->vm_file->f_mapping;
4709 pgoff = linear_page_index(vma, addr);
4710
4711
4712#ifdef CONFIG_SWAP
4713
4714 if (shmem_mapping(mapping)) {
4715 page = find_get_entry(mapping, pgoff);
4716 if (radix_tree_exceptional_entry(page)) {
4717 swp_entry_t swp = radix_to_swp_entry(page);
4718 if (do_swap_account)
4719 *entry = swp;
4720 page = find_get_page(swap_address_space(swp), swp.val);
4721 }
4722 } else
4723 page = find_get_page(mapping, pgoff);
4724#else
4725 page = find_get_page(mapping, pgoff);
4726#endif
4727 return page;
4728}
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744static int mem_cgroup_move_account(struct page *page,
4745 unsigned int nr_pages,
4746 struct mem_cgroup *from,
4747 struct mem_cgroup *to)
4748{
4749 unsigned long flags;
4750 int ret;
4751
4752 VM_BUG_ON(from == to);
4753 VM_BUG_ON_PAGE(PageLRU(page), page);
4754
4755
4756
4757
4758
4759
4760 ret = -EBUSY;
4761 if (nr_pages > 1 && !PageTransHuge(page))
4762 goto out;
4763
4764
4765
4766
4767
4768
4769 if (!trylock_page(page))
4770 goto out;
4771
4772 ret = -EINVAL;
4773 if (page->mem_cgroup != from)
4774 goto out_unlock;
4775
4776 spin_lock_irqsave(&from->move_lock, flags);
4777
4778 if (!PageAnon(page) && page_mapped(page)) {
4779 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4780 nr_pages);
4781 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4782 nr_pages);
4783 }
4784
4785 if (PageWriteback(page)) {
4786 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4787 nr_pages);
4788 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4789 nr_pages);
4790 }
4791
4792
4793
4794
4795
4796
4797
4798
4799 page->mem_cgroup = to;
4800 spin_unlock_irqrestore(&from->move_lock, flags);
4801
4802 ret = 0;
4803
4804 local_irq_disable();
4805 mem_cgroup_charge_statistics(to, page, nr_pages);
4806 memcg_check_events(to, page);
4807 mem_cgroup_charge_statistics(from, page, -nr_pages);
4808 memcg_check_events(from, page);
4809 local_irq_enable();
4810out_unlock:
4811 unlock_page(page);
4812out:
4813 return ret;
4814}
4815
4816static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4817 unsigned long addr, pte_t ptent, union mc_target *target)
4818{
4819 struct page *page = NULL;
4820 enum mc_target_type ret = MC_TARGET_NONE;
4821 swp_entry_t ent = { .val = 0 };
4822
4823 if (pte_present(ptent))
4824 page = mc_handle_present_pte(vma, addr, ptent);
4825 else if (is_swap_pte(ptent))
4826 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4827 else if (pte_none(ptent))
4828 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4829
4830 if (!page && !ent.val)
4831 return ret;
4832 if (page) {
4833
4834
4835
4836
4837
4838 if (page->mem_cgroup == mc.from) {
4839 ret = MC_TARGET_PAGE;
4840 if (target)
4841 target->page = page;
4842 }
4843 if (!ret || !target)
4844 put_page(page);
4845 }
4846
4847 if (ent.val && !ret &&
4848 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4849 ret = MC_TARGET_SWAP;
4850 if (target)
4851 target->ent = ent;
4852 }
4853 return ret;
4854}
4855
4856#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4857
4858
4859
4860
4861
4862static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4863 unsigned long addr, pmd_t pmd, union mc_target *target)
4864{
4865 struct page *page = NULL;
4866 enum mc_target_type ret = MC_TARGET_NONE;
4867
4868 page = pmd_page(pmd);
4869 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4870 if (!(mc.flags & MOVE_ANON))
4871 return ret;
4872 if (page->mem_cgroup == mc.from) {
4873 ret = MC_TARGET_PAGE;
4874 if (target) {
4875 get_page(page);
4876 target->page = page;
4877 }
4878 }
4879 return ret;
4880}
4881#else
4882static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4883 unsigned long addr, pmd_t pmd, union mc_target *target)
4884{
4885 return MC_TARGET_NONE;
4886}
4887#endif
4888
4889static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4890 unsigned long addr, unsigned long end,
4891 struct mm_walk *walk)
4892{
4893 struct vm_area_struct *vma = walk->vma;
4894 pte_t *pte;
4895 spinlock_t *ptl;
4896
4897 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4898 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4899 mc.precharge += HPAGE_PMD_NR;
4900 spin_unlock(ptl);
4901 return 0;
4902 }
4903
4904 if (pmd_trans_unstable(pmd))
4905 return 0;
4906 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4907 for (; addr != end; pte++, addr += PAGE_SIZE)
4908 if (get_mctgt_type(vma, addr, *pte, NULL))
4909 mc.precharge++;
4910 pte_unmap_unlock(pte - 1, ptl);
4911 cond_resched();
4912
4913 return 0;
4914}
4915
4916static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4917{
4918 unsigned long precharge;
4919
4920 struct mm_walk mem_cgroup_count_precharge_walk = {
4921 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4922 .mm = mm,
4923 };
4924 down_read(&mm->mmap_sem);
4925 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4926 up_read(&mm->mmap_sem);
4927
4928 precharge = mc.precharge;
4929 mc.precharge = 0;
4930
4931 return precharge;
4932}
4933
4934static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4935{
4936 unsigned long precharge = mem_cgroup_count_precharge(mm);
4937
4938 VM_BUG_ON(mc.moving_task);
4939 mc.moving_task = current;
4940 return mem_cgroup_do_precharge(precharge);
4941}
4942
4943
4944static void __mem_cgroup_clear_mc(void)
4945{
4946 struct mem_cgroup *from = mc.from;
4947 struct mem_cgroup *to = mc.to;
4948
4949
4950 if (mc.precharge) {
4951 cancel_charge(mc.to, mc.precharge);
4952 mc.precharge = 0;
4953 }
4954
4955
4956
4957
4958 if (mc.moved_charge) {
4959 cancel_charge(mc.from, mc.moved_charge);
4960 mc.moved_charge = 0;
4961 }
4962
4963 if (mc.moved_swap) {
4964
4965 if (!mem_cgroup_is_root(mc.from))
4966 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4967
4968
4969
4970
4971
4972 if (!mem_cgroup_is_root(mc.to))
4973 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4974
4975 css_put_many(&mc.from->css, mc.moved_swap);
4976
4977
4978 mc.moved_swap = 0;
4979 }
4980 memcg_oom_recover(from);
4981 memcg_oom_recover(to);
4982 wake_up_all(&mc.waitq);
4983}
4984
4985static void mem_cgroup_clear_mc(void)
4986{
4987
4988
4989
4990
4991 mc.moving_task = NULL;
4992 __mem_cgroup_clear_mc();
4993 spin_lock(&mc.lock);
4994 mc.from = NULL;
4995 mc.to = NULL;
4996 spin_unlock(&mc.lock);
4997}
4998
4999static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5000 struct cgroup_taskset *tset)
5001{
5002 struct task_struct *p = cgroup_taskset_first(tset);
5003 int ret = 0;
5004 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5005 unsigned long move_flags;
5006
5007
5008
5009
5010
5011
5012 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5013 if (move_flags) {
5014 struct mm_struct *mm;
5015 struct mem_cgroup *from = mem_cgroup_from_task(p);
5016
5017 VM_BUG_ON(from == memcg);
5018
5019 mm = get_task_mm(p);
5020 if (!mm)
5021 return 0;
5022
5023 if (mm->owner == p) {
5024 VM_BUG_ON(mc.from);
5025 VM_BUG_ON(mc.to);
5026 VM_BUG_ON(mc.precharge);
5027 VM_BUG_ON(mc.moved_charge);
5028 VM_BUG_ON(mc.moved_swap);
5029
5030 spin_lock(&mc.lock);
5031 mc.from = from;
5032 mc.to = memcg;
5033 mc.flags = move_flags;
5034 spin_unlock(&mc.lock);
5035
5036
5037 ret = mem_cgroup_precharge_mc(mm);
5038 if (ret)
5039 mem_cgroup_clear_mc();
5040 }
5041 mmput(mm);
5042 }
5043 return ret;
5044}
5045
5046static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
5047 struct cgroup_taskset *tset)
5048{
5049 if (mc.to)
5050 mem_cgroup_clear_mc();
5051}
5052
5053static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5054 unsigned long addr, unsigned long end,
5055 struct mm_walk *walk)
5056{
5057 int ret = 0;
5058 struct vm_area_struct *vma = walk->vma;
5059 pte_t *pte;
5060 spinlock_t *ptl;
5061 enum mc_target_type target_type;
5062 union mc_target target;
5063 struct page *page;
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
5076 if (mc.precharge < HPAGE_PMD_NR) {
5077 spin_unlock(ptl);
5078 return 0;
5079 }
5080 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5081 if (target_type == MC_TARGET_PAGE) {
5082 page = target.page;
5083 if (!isolate_lru_page(page)) {
5084 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
5085 mc.from, mc.to)) {
5086 mc.precharge -= HPAGE_PMD_NR;
5087 mc.moved_charge += HPAGE_PMD_NR;
5088 }
5089 putback_lru_page(page);
5090 }
5091 put_page(page);
5092 }
5093 spin_unlock(ptl);
5094 return 0;
5095 }
5096
5097 if (pmd_trans_unstable(pmd))
5098 return 0;
5099retry:
5100 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5101 for (; addr != end; addr += PAGE_SIZE) {
5102 pte_t ptent = *(pte++);
5103 swp_entry_t ent;
5104
5105 if (!mc.precharge)
5106 break;
5107
5108 switch (get_mctgt_type(vma, addr, ptent, &target)) {
5109 case MC_TARGET_PAGE:
5110 page = target.page;
5111 if (isolate_lru_page(page))
5112 goto put;
5113 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
5114 mc.precharge--;
5115
5116 mc.moved_charge++;
5117 }
5118 putback_lru_page(page);
5119put:
5120 put_page(page);
5121 break;
5122 case MC_TARGET_SWAP:
5123 ent = target.ent;
5124 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5125 mc.precharge--;
5126
5127 mc.moved_swap++;
5128 }
5129 break;
5130 default:
5131 break;
5132 }
5133 }
5134 pte_unmap_unlock(pte - 1, ptl);
5135 cond_resched();
5136
5137 if (addr != end) {
5138
5139
5140
5141
5142
5143
5144 ret = mem_cgroup_do_precharge(1);
5145 if (!ret)
5146 goto retry;
5147 }
5148
5149 return ret;
5150}
5151
5152static void mem_cgroup_move_charge(struct mm_struct *mm)
5153{
5154 struct mm_walk mem_cgroup_move_charge_walk = {
5155 .pmd_entry = mem_cgroup_move_charge_pte_range,
5156 .mm = mm,
5157 };
5158
5159 lru_add_drain_all();
5160
5161
5162
5163
5164
5165 atomic_inc(&mc.from->moving_account);
5166 synchronize_rcu();
5167retry:
5168 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5169
5170
5171
5172
5173
5174
5175
5176 __mem_cgroup_clear_mc();
5177 cond_resched();
5178 goto retry;
5179 }
5180
5181
5182
5183
5184 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5185 up_read(&mm->mmap_sem);
5186 atomic_dec(&mc.from->moving_account);
5187}
5188
5189static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5190 struct cgroup_taskset *tset)
5191{
5192 struct task_struct *p = cgroup_taskset_first(tset);
5193 struct mm_struct *mm = get_task_mm(p);
5194
5195 if (mm) {
5196 if (mc.to)
5197 mem_cgroup_move_charge(mm);
5198 mmput(mm);
5199 }
5200 if (mc.to)
5201 mem_cgroup_clear_mc();
5202}
5203#else
5204static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5205 struct cgroup_taskset *tset)
5206{
5207 return 0;
5208}
5209static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
5210 struct cgroup_taskset *tset)
5211{
5212}
5213static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5214 struct cgroup_taskset *tset)
5215{
5216}
5217#endif
5218
5219
5220
5221
5222
5223
5224static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5225{
5226
5227
5228
5229
5230
5231 if (cgroup_on_dfl(root_css->cgroup))
5232 root_mem_cgroup->use_hierarchy = true;
5233 else
5234 root_mem_cgroup->use_hierarchy = false;
5235}
5236
5237static u64 memory_current_read(struct cgroup_subsys_state *css,
5238 struct cftype *cft)
5239{
5240 return mem_cgroup_usage(mem_cgroup_from_css(css), false);
5241}
5242
5243static int memory_low_show(struct seq_file *m, void *v)
5244{
5245 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5246 unsigned long low = READ_ONCE(memcg->low);
5247
5248 if (low == PAGE_COUNTER_MAX)
5249 seq_puts(m, "max\n");
5250 else
5251 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5252
5253 return 0;
5254}
5255
5256static ssize_t memory_low_write(struct kernfs_open_file *of,
5257 char *buf, size_t nbytes, loff_t off)
5258{
5259 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5260 unsigned long low;
5261 int err;
5262
5263 buf = strstrip(buf);
5264 err = page_counter_memparse(buf, "max", &low);
5265 if (err)
5266 return err;
5267
5268 memcg->low = low;
5269
5270 return nbytes;
5271}
5272
5273static int memory_high_show(struct seq_file *m, void *v)
5274{
5275 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5276 unsigned long high = READ_ONCE(memcg->high);
5277
5278 if (high == PAGE_COUNTER_MAX)
5279 seq_puts(m, "max\n");
5280 else
5281 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5282
5283 return 0;
5284}
5285
5286static ssize_t memory_high_write(struct kernfs_open_file *of,
5287 char *buf, size_t nbytes, loff_t off)
5288{
5289 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5290 unsigned long high;
5291 int err;
5292
5293 buf = strstrip(buf);
5294 err = page_counter_memparse(buf, "max", &high);
5295 if (err)
5296 return err;
5297
5298 memcg->high = high;
5299
5300 return nbytes;
5301}
5302
5303static int memory_max_show(struct seq_file *m, void *v)
5304{
5305 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5306 unsigned long max = READ_ONCE(memcg->memory.limit);
5307
5308 if (max == PAGE_COUNTER_MAX)
5309 seq_puts(m, "max\n");
5310 else
5311 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5312
5313 return 0;
5314}
5315
5316static ssize_t memory_max_write(struct kernfs_open_file *of,
5317 char *buf, size_t nbytes, loff_t off)
5318{
5319 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5320 unsigned long max;
5321 int err;
5322
5323 buf = strstrip(buf);
5324 err = page_counter_memparse(buf, "max", &max);
5325 if (err)
5326 return err;
5327
5328 err = mem_cgroup_resize_limit(memcg, max);
5329 if (err)
5330 return err;
5331
5332 return nbytes;
5333}
5334
5335static int memory_events_show(struct seq_file *m, void *v)
5336{
5337 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5338
5339 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5340 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5341 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5342 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5343
5344 return 0;
5345}
5346
5347static struct cftype memory_files[] = {
5348 {
5349 .name = "current",
5350 .read_u64 = memory_current_read,
5351 },
5352 {
5353 .name = "low",
5354 .flags = CFTYPE_NOT_ON_ROOT,
5355 .seq_show = memory_low_show,
5356 .write = memory_low_write,
5357 },
5358 {
5359 .name = "high",
5360 .flags = CFTYPE_NOT_ON_ROOT,
5361 .seq_show = memory_high_show,
5362 .write = memory_high_write,
5363 },
5364 {
5365 .name = "max",
5366 .flags = CFTYPE_NOT_ON_ROOT,
5367 .seq_show = memory_max_show,
5368 .write = memory_max_write,
5369 },
5370 {
5371 .name = "events",
5372 .flags = CFTYPE_NOT_ON_ROOT,
5373 .seq_show = memory_events_show,
5374 },
5375 { }
5376};
5377
5378struct cgroup_subsys memory_cgrp_subsys = {
5379 .css_alloc = mem_cgroup_css_alloc,
5380 .css_online = mem_cgroup_css_online,
5381 .css_offline = mem_cgroup_css_offline,
5382 .css_free = mem_cgroup_css_free,
5383 .css_reset = mem_cgroup_css_reset,
5384 .can_attach = mem_cgroup_can_attach,
5385 .cancel_attach = mem_cgroup_cancel_attach,
5386 .attach = mem_cgroup_move_task,
5387 .bind = mem_cgroup_bind,
5388 .dfl_cftypes = memory_files,
5389 .legacy_cftypes = mem_cgroup_legacy_files,
5390 .early_init = 0,
5391};
5392
5393
5394
5395
5396
5397
5398
5399void mem_cgroup_events(struct mem_cgroup *memcg,
5400 enum mem_cgroup_events_index idx,
5401 unsigned int nr)
5402{
5403 this_cpu_add(memcg->stat->events[idx], nr);
5404}
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5415{
5416 if (mem_cgroup_disabled())
5417 return false;
5418
5419
5420
5421
5422
5423
5424
5425 if (memcg == root_mem_cgroup)
5426 return false;
5427
5428 if (page_counter_read(&memcg->memory) >= memcg->low)
5429 return false;
5430
5431 while (memcg != root) {
5432 memcg = parent_mem_cgroup(memcg);
5433
5434 if (memcg == root_mem_cgroup)
5435 break;
5436
5437 if (page_counter_read(&memcg->memory) >= memcg->low)
5438 return false;
5439 }
5440 return true;
5441}
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5461 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5462{
5463 struct mem_cgroup *memcg = NULL;
5464 unsigned int nr_pages = 1;
5465 int ret = 0;
5466
5467 if (mem_cgroup_disabled())
5468 goto out;
5469
5470 if (PageSwapCache(page)) {
5471
5472
5473
5474
5475
5476
5477
5478 if (page->mem_cgroup)
5479 goto out;
5480 }
5481
5482 if (PageTransHuge(page)) {
5483 nr_pages <<= compound_order(page);
5484 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5485 }
5486
5487 if (do_swap_account && PageSwapCache(page))
5488 memcg = try_get_mem_cgroup_from_page(page);
5489 if (!memcg)
5490 memcg = get_mem_cgroup_from_mm(mm);
5491
5492 ret = try_charge(memcg, gfp_mask, nr_pages);
5493
5494 css_put(&memcg->css);
5495
5496 if (ret == -EINTR) {
5497 memcg = root_mem_cgroup;
5498 ret = 0;
5499 }
5500out:
5501 *memcgp = memcg;
5502 return ret;
5503}
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5522 bool lrucare)
5523{
5524 unsigned int nr_pages = 1;
5525
5526 VM_BUG_ON_PAGE(!page->mapping, page);
5527 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5528
5529 if (mem_cgroup_disabled())
5530 return;
5531
5532
5533
5534
5535
5536 if (!memcg)
5537 return;
5538
5539 commit_charge(page, memcg, lrucare);
5540
5541 if (PageTransHuge(page)) {
5542 nr_pages <<= compound_order(page);
5543 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5544 }
5545
5546 local_irq_disable();
5547 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5548 memcg_check_events(memcg, page);
5549 local_irq_enable();
5550
5551 if (do_swap_account && PageSwapCache(page)) {
5552 swp_entry_t entry = { .val = page_private(page) };
5553
5554
5555
5556
5557
5558 mem_cgroup_uncharge_swap(entry);
5559 }
5560}
5561
5562
5563
5564
5565
5566
5567
5568
5569void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5570{
5571 unsigned int nr_pages = 1;
5572
5573 if (mem_cgroup_disabled())
5574 return;
5575
5576
5577
5578
5579
5580 if (!memcg)
5581 return;
5582
5583 if (PageTransHuge(page)) {
5584 nr_pages <<= compound_order(page);
5585 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5586 }
5587
5588 cancel_charge(memcg, nr_pages);
5589}
5590
5591static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5592 unsigned long nr_anon, unsigned long nr_file,
5593 unsigned long nr_huge, struct page *dummy_page)
5594{
5595 unsigned long nr_pages = nr_anon + nr_file;
5596 unsigned long flags;
5597
5598 if (!mem_cgroup_is_root(memcg)) {
5599 page_counter_uncharge(&memcg->memory, nr_pages);
5600 if (do_swap_account)
5601 page_counter_uncharge(&memcg->memsw, nr_pages);
5602 memcg_oom_recover(memcg);
5603 }
5604
5605 local_irq_save(flags);
5606 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5607 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5608 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5609 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5610 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5611 memcg_check_events(memcg, dummy_page);
5612 local_irq_restore(flags);
5613
5614 if (!mem_cgroup_is_root(memcg))
5615 css_put_many(&memcg->css, nr_pages);
5616}
5617
5618static void uncharge_list(struct list_head *page_list)
5619{
5620 struct mem_cgroup *memcg = NULL;
5621 unsigned long nr_anon = 0;
5622 unsigned long nr_file = 0;
5623 unsigned long nr_huge = 0;
5624 unsigned long pgpgout = 0;
5625 struct list_head *next;
5626 struct page *page;
5627
5628 next = page_list->next;
5629 do {
5630 unsigned int nr_pages = 1;
5631
5632 page = list_entry(next, struct page, lru);
5633 next = page->lru.next;
5634
5635 VM_BUG_ON_PAGE(PageLRU(page), page);
5636 VM_BUG_ON_PAGE(page_count(page), page);
5637
5638 if (!page->mem_cgroup)
5639 continue;
5640
5641
5642
5643
5644
5645
5646
5647 if (memcg != page->mem_cgroup) {
5648 if (memcg) {
5649 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5650 nr_huge, page);
5651 pgpgout = nr_anon = nr_file = nr_huge = 0;
5652 }
5653 memcg = page->mem_cgroup;
5654 }
5655
5656 if (PageTransHuge(page)) {
5657 nr_pages <<= compound_order(page);
5658 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5659 nr_huge += nr_pages;
5660 }
5661
5662 if (PageAnon(page))
5663 nr_anon += nr_pages;
5664 else
5665 nr_file += nr_pages;
5666
5667 page->mem_cgroup = NULL;
5668
5669 pgpgout++;
5670 } while (next != page_list);
5671
5672 if (memcg)
5673 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5674 nr_huge, page);
5675}
5676
5677
5678
5679
5680
5681
5682
5683
5684void mem_cgroup_uncharge(struct page *page)
5685{
5686 if (mem_cgroup_disabled())
5687 return;
5688
5689
5690 if (!page->mem_cgroup)
5691 return;
5692
5693 INIT_LIST_HEAD(&page->lru);
5694 uncharge_list(&page->lru);
5695}
5696
5697
5698
5699
5700
5701
5702
5703
5704void mem_cgroup_uncharge_list(struct list_head *page_list)
5705{
5706 if (mem_cgroup_disabled())
5707 return;
5708
5709 if (!list_empty(page_list))
5710 uncharge_list(page_list);
5711}
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
5724 bool lrucare)
5725{
5726 struct mem_cgroup *memcg;
5727 int isolated;
5728
5729 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5730 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5731 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
5732 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
5733 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5734 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5735 newpage);
5736
5737 if (mem_cgroup_disabled())
5738 return;
5739
5740
5741 if (newpage->mem_cgroup)
5742 return;
5743
5744
5745
5746
5747
5748
5749
5750 memcg = oldpage->mem_cgroup;
5751 if (!memcg)
5752 return;
5753
5754 if (lrucare)
5755 lock_page_lru(oldpage, &isolated);
5756
5757 oldpage->mem_cgroup = NULL;
5758
5759 if (lrucare)
5760 unlock_page_lru(oldpage, isolated);
5761
5762 commit_charge(newpage, memcg, lrucare);
5763}
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773static int __init mem_cgroup_init(void)
5774{
5775 int cpu, node;
5776
5777 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5778
5779 for_each_possible_cpu(cpu)
5780 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5781 drain_local_stock);
5782
5783 for_each_node(node) {
5784 struct mem_cgroup_tree_per_node *rtpn;
5785 int zone;
5786
5787 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5788 node_online(node) ? node : NUMA_NO_NODE);
5789
5790 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5791 struct mem_cgroup_tree_per_zone *rtpz;
5792
5793 rtpz = &rtpn->rb_tree_per_zone[zone];
5794 rtpz->rb_root = RB_ROOT;
5795 spin_lock_init(&rtpz->lock);
5796 }
5797 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5798 }
5799
5800 return 0;
5801}
5802subsys_initcall(mem_cgroup_init);
5803
5804#ifdef CONFIG_MEMCG_SWAP
5805
5806
5807
5808
5809
5810
5811
5812void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5813{
5814 struct mem_cgroup *memcg;
5815 unsigned short oldid;
5816
5817 VM_BUG_ON_PAGE(PageLRU(page), page);
5818 VM_BUG_ON_PAGE(page_count(page), page);
5819
5820 if (!do_swap_account)
5821 return;
5822
5823 memcg = page->mem_cgroup;
5824
5825
5826 if (!memcg)
5827 return;
5828
5829 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5830 VM_BUG_ON_PAGE(oldid, page);
5831 mem_cgroup_swap_statistics(memcg, true);
5832
5833 page->mem_cgroup = NULL;
5834
5835 if (!mem_cgroup_is_root(memcg))
5836 page_counter_uncharge(&memcg->memory, 1);
5837
5838
5839 mem_cgroup_charge_statistics(memcg, page, -1);
5840 memcg_check_events(memcg, page);
5841}
5842
5843
5844
5845
5846
5847
5848
5849void mem_cgroup_uncharge_swap(swp_entry_t entry)
5850{
5851 struct mem_cgroup *memcg;
5852 unsigned short id;
5853
5854 if (!do_swap_account)
5855 return;
5856
5857 id = swap_cgroup_record(entry, 0);
5858 rcu_read_lock();
5859 memcg = mem_cgroup_from_id(id);
5860 if (memcg) {
5861 if (!mem_cgroup_is_root(memcg))
5862 page_counter_uncharge(&memcg->memsw, 1);
5863 mem_cgroup_swap_statistics(memcg, false);
5864 css_put(&memcg->css);
5865 }
5866 rcu_read_unlock();
5867}
5868
5869
5870#ifdef CONFIG_MEMCG_SWAP_ENABLED
5871static int really_do_swap_account __initdata = 1;
5872#else
5873static int really_do_swap_account __initdata;
5874#endif
5875
5876static int __init enable_swap_account(char *s)
5877{
5878 if (!strcmp(s, "1"))
5879 really_do_swap_account = 1;
5880 else if (!strcmp(s, "0"))
5881 really_do_swap_account = 0;
5882 return 1;
5883}
5884__setup("swapaccount=", enable_swap_account);
5885
5886static struct cftype memsw_cgroup_files[] = {
5887 {
5888 .name = "memsw.usage_in_bytes",
5889 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5890 .read_u64 = mem_cgroup_read_u64,
5891 },
5892 {
5893 .name = "memsw.max_usage_in_bytes",
5894 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5895 .write = mem_cgroup_reset,
5896 .read_u64 = mem_cgroup_read_u64,
5897 },
5898 {
5899 .name = "memsw.limit_in_bytes",
5900 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5901 .write = mem_cgroup_write,
5902 .read_u64 = mem_cgroup_read_u64,
5903 },
5904 {
5905 .name = "memsw.failcnt",
5906 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5907 .write = mem_cgroup_reset,
5908 .read_u64 = mem_cgroup_read_u64,
5909 },
5910 { },
5911};
5912
5913static int __init mem_cgroup_swap_init(void)
5914{
5915 if (!mem_cgroup_disabled() && really_do_swap_account) {
5916 do_swap_account = 1;
5917 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5918 memsw_cgroup_files));
5919 }
5920 return 0;
5921}
5922subsys_initcall(mem_cgroup_swap_init);
5923
5924#endif
5925