linux/mm/memcontrol.c
<<
>>
Prefs
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * Kernel Memory Controller
  14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15 * Authors: Glauber Costa and Suleiman Souhlal
  16 *
  17 * This program is free software; you can redistribute it and/or modify
  18 * it under the terms of the GNU General Public License as published by
  19 * the Free Software Foundation; either version 2 of the License, or
  20 * (at your option) any later version.
  21 *
  22 * This program is distributed in the hope that it will be useful,
  23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  25 * GNU General Public License for more details.
  26 */
  27
  28#include <linux/res_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/mm.h>
  32#include <linux/hugetlb.h>
  33#include <linux/pagemap.h>
  34#include <linux/smp.h>
  35#include <linux/page-flags.h>
  36#include <linux/backing-dev.h>
  37#include <linux/bit_spinlock.h>
  38#include <linux/rcupdate.h>
  39#include <linux/limits.h>
  40#include <linux/export.h>
  41#include <linux/mutex.h>
  42#include <linux/rbtree.h>
  43#include <linux/slab.h>
  44#include <linux/swap.h>
  45#include <linux/swapops.h>
  46#include <linux/spinlock.h>
  47#include <linux/eventfd.h>
  48#include <linux/poll.h>
  49#include <linux/sort.h>
  50#include <linux/fs.h>
  51#include <linux/seq_file.h>
  52#include <linux/vmpressure.h>
  53#include <linux/mm_inline.h>
  54#include <linux/page_cgroup.h>
  55#include <linux/cpu.h>
  56#include <linux/oom.h>
  57#include <linux/lockdep.h>
  58#include <linux/file.h>
  59#include "internal.h"
  60#include <net/sock.h>
  61#include <net/ip.h>
  62#include <net/tcp_memcontrol.h>
  63#include "slab.h"
  64
  65#include <asm/uaccess.h>
  66
  67#include <trace/events/vmscan.h>
  68
  69struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  70EXPORT_SYMBOL(memory_cgrp_subsys);
  71
  72#define MEM_CGROUP_RECLAIM_RETRIES      5
  73static struct mem_cgroup *root_mem_cgroup __read_mostly;
  74
  75#ifdef CONFIG_MEMCG_SWAP
  76/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  77int do_swap_account __read_mostly;
  78
  79/* for remember boot option*/
  80#ifdef CONFIG_MEMCG_SWAP_ENABLED
  81static int really_do_swap_account __initdata = 1;
  82#else
  83static int really_do_swap_account __initdata;
  84#endif
  85
  86#else
  87#define do_swap_account         0
  88#endif
  89
  90
  91static const char * const mem_cgroup_stat_names[] = {
  92        "cache",
  93        "rss",
  94        "rss_huge",
  95        "mapped_file",
  96        "writeback",
  97        "swap",
  98};
  99
 100enum mem_cgroup_events_index {
 101        MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
 102        MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
 103        MEM_CGROUP_EVENTS_PGFAULT,      /* # of page-faults */
 104        MEM_CGROUP_EVENTS_PGMAJFAULT,   /* # of major page-faults */
 105        MEM_CGROUP_EVENTS_NSTATS,
 106};
 107
 108static const char * const mem_cgroup_events_names[] = {
 109        "pgpgin",
 110        "pgpgout",
 111        "pgfault",
 112        "pgmajfault",
 113};
 114
 115static const char * const mem_cgroup_lru_names[] = {
 116        "inactive_anon",
 117        "active_anon",
 118        "inactive_file",
 119        "active_file",
 120        "unevictable",
 121};
 122
 123/*
 124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 125 * it will be incremated by the number of pages. This counter is used for
 126 * for trigger some periodic events. This is straightforward and better
 127 * than using jiffies etc. to handle periodic memcg event.
 128 */
 129enum mem_cgroup_events_target {
 130        MEM_CGROUP_TARGET_THRESH,
 131        MEM_CGROUP_TARGET_SOFTLIMIT,
 132        MEM_CGROUP_TARGET_NUMAINFO,
 133        MEM_CGROUP_NTARGETS,
 134};
 135#define THRESHOLDS_EVENTS_TARGET 128
 136#define SOFTLIMIT_EVENTS_TARGET 1024
 137#define NUMAINFO_EVENTS_TARGET  1024
 138
 139struct mem_cgroup_stat_cpu {
 140        long count[MEM_CGROUP_STAT_NSTATS];
 141        unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
 142        unsigned long nr_page_events;
 143        unsigned long targets[MEM_CGROUP_NTARGETS];
 144};
 145
 146struct mem_cgroup_reclaim_iter {
 147        /*
 148         * last scanned hierarchy member. Valid only if last_dead_count
 149         * matches memcg->dead_count of the hierarchy root group.
 150         */
 151        struct mem_cgroup *last_visited;
 152        int last_dead_count;
 153
 154        /* scan generation, increased every round-trip */
 155        unsigned int generation;
 156};
 157
 158/*
 159 * per-zone information in memory controller.
 160 */
 161struct mem_cgroup_per_zone {
 162        struct lruvec           lruvec;
 163        unsigned long           lru_size[NR_LRU_LISTS];
 164
 165        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 166
 167        struct rb_node          tree_node;      /* RB tree node */
 168        unsigned long long      usage_in_excess;/* Set to the value by which */
 169                                                /* the soft limit is exceeded*/
 170        bool                    on_tree;
 171        struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
 172                                                /* use container_of        */
 173};
 174
 175struct mem_cgroup_per_node {
 176        struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 177};
 178
 179/*
 180 * Cgroups above their limits are maintained in a RB-Tree, independent of
 181 * their hierarchy representation
 182 */
 183
 184struct mem_cgroup_tree_per_zone {
 185        struct rb_root rb_root;
 186        spinlock_t lock;
 187};
 188
 189struct mem_cgroup_tree_per_node {
 190        struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
 191};
 192
 193struct mem_cgroup_tree {
 194        struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 195};
 196
 197static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 198
 199struct mem_cgroup_threshold {
 200        struct eventfd_ctx *eventfd;
 201        u64 threshold;
 202};
 203
 204/* For threshold */
 205struct mem_cgroup_threshold_ary {
 206        /* An array index points to threshold just below or equal to usage. */
 207        int current_threshold;
 208        /* Size of entries[] */
 209        unsigned int size;
 210        /* Array of thresholds */
 211        struct mem_cgroup_threshold entries[0];
 212};
 213
 214struct mem_cgroup_thresholds {
 215        /* Primary thresholds array */
 216        struct mem_cgroup_threshold_ary *primary;
 217        /*
 218         * Spare threshold array.
 219         * This is needed to make mem_cgroup_unregister_event() "never fail".
 220         * It must be able to store at least primary->size - 1 entries.
 221         */
 222        struct mem_cgroup_threshold_ary *spare;
 223};
 224
 225/* for OOM */
 226struct mem_cgroup_eventfd_list {
 227        struct list_head list;
 228        struct eventfd_ctx *eventfd;
 229};
 230
 231/*
 232 * cgroup_event represents events which userspace want to receive.
 233 */
 234struct mem_cgroup_event {
 235        /*
 236         * memcg which the event belongs to.
 237         */
 238        struct mem_cgroup *memcg;
 239        /*
 240         * eventfd to signal userspace about the event.
 241         */
 242        struct eventfd_ctx *eventfd;
 243        /*
 244         * Each of these stored in a list by the cgroup.
 245         */
 246        struct list_head list;
 247        /*
 248         * register_event() callback will be used to add new userspace
 249         * waiter for changes related to this event.  Use eventfd_signal()
 250         * on eventfd to send notification to userspace.
 251         */
 252        int (*register_event)(struct mem_cgroup *memcg,
 253                              struct eventfd_ctx *eventfd, const char *args);
 254        /*
 255         * unregister_event() callback will be called when userspace closes
 256         * the eventfd or on cgroup removing.  This callback must be set,
 257         * if you want provide notification functionality.
 258         */
 259        void (*unregister_event)(struct mem_cgroup *memcg,
 260                                 struct eventfd_ctx *eventfd);
 261        /*
 262         * All fields below needed to unregister event when
 263         * userspace closes eventfd.
 264         */
 265        poll_table pt;
 266        wait_queue_head_t *wqh;
 267        wait_queue_t wait;
 268        struct work_struct remove;
 269};
 270
 271static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 272static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 273
 274/*
 275 * The memory controller data structure. The memory controller controls both
 276 * page cache and RSS per cgroup. We would eventually like to provide
 277 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 278 * to help the administrator determine what knobs to tune.
 279 *
 280 * TODO: Add a water mark for the memory controller. Reclaim will begin when
 281 * we hit the water mark. May be even add a low water mark, such that
 282 * no reclaim occurs from a cgroup at it's low water mark, this is
 283 * a feature that will be implemented much later in the future.
 284 */
 285struct mem_cgroup {
 286        struct cgroup_subsys_state css;
 287        /*
 288         * the counter to account for memory usage
 289         */
 290        struct res_counter res;
 291
 292        /* vmpressure notifications */
 293        struct vmpressure vmpressure;
 294
 295        /* css_online() has been completed */
 296        int initialized;
 297
 298        /*
 299         * the counter to account for mem+swap usage.
 300         */
 301        struct res_counter memsw;
 302
 303        /*
 304         * the counter to account for kernel memory usage.
 305         */
 306        struct res_counter kmem;
 307        /*
 308         * Should the accounting and control be hierarchical, per subtree?
 309         */
 310        bool use_hierarchy;
 311        unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
 312
 313        bool            oom_lock;
 314        atomic_t        under_oom;
 315        atomic_t        oom_wakeups;
 316
 317        int     swappiness;
 318        /* OOM-Killer disable */
 319        int             oom_kill_disable;
 320
 321        /* protect arrays of thresholds */
 322        struct mutex thresholds_lock;
 323
 324        /* thresholds for memory usage. RCU-protected */
 325        struct mem_cgroup_thresholds thresholds;
 326
 327        /* thresholds for mem+swap usage. RCU-protected */
 328        struct mem_cgroup_thresholds memsw_thresholds;
 329
 330        /* For oom notifier event fd */
 331        struct list_head oom_notify;
 332
 333        /*
 334         * Should we move charges of a task when a task is moved into this
 335         * mem_cgroup ? And what type of charges should we move ?
 336         */
 337        unsigned long move_charge_at_immigrate;
 338        /*
 339         * set > 0 if pages under this cgroup are moving to other cgroup.
 340         */
 341        atomic_t        moving_account;
 342        /* taken only while moving_account > 0 */
 343        spinlock_t      move_lock;
 344        /*
 345         * percpu counter.
 346         */
 347        struct mem_cgroup_stat_cpu __percpu *stat;
 348        /*
 349         * used when a cpu is offlined or other synchronizations
 350         * See mem_cgroup_read_stat().
 351         */
 352        struct mem_cgroup_stat_cpu nocpu_base;
 353        spinlock_t pcp_counter_lock;
 354
 355        atomic_t        dead_count;
 356#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
 357        struct cg_proto tcp_mem;
 358#endif
 359#if defined(CONFIG_MEMCG_KMEM)
 360        /* analogous to slab_common's slab_caches list, but per-memcg;
 361         * protected by memcg_slab_mutex */
 362        struct list_head memcg_slab_caches;
 363        /* Index in the kmem_cache->memcg_params->memcg_caches array */
 364        int kmemcg_id;
 365#endif
 366
 367        int last_scanned_node;
 368#if MAX_NUMNODES > 1
 369        nodemask_t      scan_nodes;
 370        atomic_t        numainfo_events;
 371        atomic_t        numainfo_updating;
 372#endif
 373
 374        /* List of events which userspace want to receive */
 375        struct list_head event_list;
 376        spinlock_t event_list_lock;
 377
 378        struct mem_cgroup_per_node *nodeinfo[0];
 379        /* WARNING: nodeinfo must be the last member here */
 380};
 381
 382/* internal only representation about the status of kmem accounting. */
 383enum {
 384        KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
 385        KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
 386};
 387
 388#ifdef CONFIG_MEMCG_KMEM
 389static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
 390{
 391        set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
 392}
 393
 394static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
 395{
 396        return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
 397}
 398
 399static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
 400{
 401        /*
 402         * Our caller must use css_get() first, because memcg_uncharge_kmem()
 403         * will call css_put() if it sees the memcg is dead.
 404         */
 405        smp_wmb();
 406        if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
 407                set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
 408}
 409
 410static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
 411{
 412        return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
 413                                  &memcg->kmem_account_flags);
 414}
 415#endif
 416
 417/* Stuffs for move charges at task migration. */
 418/*
 419 * Types of charges to be moved. "move_charge_at_immitgrate" and
 420 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
 421 */
 422enum move_type {
 423        MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
 424        MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
 425        NR_MOVE_TYPE,
 426};
 427
 428/* "mc" and its members are protected by cgroup_mutex */
 429static struct move_charge_struct {
 430        spinlock_t        lock; /* for from, to */
 431        struct mem_cgroup *from;
 432        struct mem_cgroup *to;
 433        unsigned long immigrate_flags;
 434        unsigned long precharge;
 435        unsigned long moved_charge;
 436        unsigned long moved_swap;
 437        struct task_struct *moving_task;        /* a task moving charges */
 438        wait_queue_head_t waitq;                /* a waitq for other context */
 439} mc = {
 440        .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 441        .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 442};
 443
 444static bool move_anon(void)
 445{
 446        return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
 447}
 448
 449static bool move_file(void)
 450{
 451        return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
 452}
 453
 454/*
 455 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 456 * limit reclaim to prevent infinite loops, if they ever occur.
 457 */
 458#define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
 459#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
 460
 461enum charge_type {
 462        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 463        MEM_CGROUP_CHARGE_TYPE_ANON,
 464        MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
 465        MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
 466        NR_CHARGE_TYPE,
 467};
 468
 469/* for encoding cft->private value on file */
 470enum res_type {
 471        _MEM,
 472        _MEMSWAP,
 473        _OOM_TYPE,
 474        _KMEM,
 475};
 476
 477#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
 478#define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
 479#define MEMFILE_ATTR(val)       ((val) & 0xffff)
 480/* Used for OOM nofiier */
 481#define OOM_CONTROL             (0)
 482
 483/*
 484 * The memcg_create_mutex will be held whenever a new cgroup is created.
 485 * As a consequence, any change that needs to protect against new child cgroups
 486 * appearing has to hold it as well.
 487 */
 488static DEFINE_MUTEX(memcg_create_mutex);
 489
 490struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
 491{
 492        return s ? container_of(s, struct mem_cgroup, css) : NULL;
 493}
 494
 495/* Some nice accessors for the vmpressure. */
 496struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 497{
 498        if (!memcg)
 499                memcg = root_mem_cgroup;
 500        return &memcg->vmpressure;
 501}
 502
 503struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 504{
 505        return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 506}
 507
 508static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 509{
 510        return (memcg == root_mem_cgroup);
 511}
 512
 513/*
 514 * We restrict the id in the range of [1, 65535], so it can fit into
 515 * an unsigned short.
 516 */
 517#define MEM_CGROUP_ID_MAX       USHRT_MAX
 518
 519static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
 520{
 521        return memcg->css.id;
 522}
 523
 524static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
 525{
 526        struct cgroup_subsys_state *css;
 527
 528        css = css_from_id(id, &memory_cgrp_subsys);
 529        return mem_cgroup_from_css(css);
 530}
 531
 532/* Writing them here to avoid exposing memcg's inner layout */
 533#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
 534
 535void sock_update_memcg(struct sock *sk)
 536{
 537        if (mem_cgroup_sockets_enabled) {
 538                struct mem_cgroup *memcg;
 539                struct cg_proto *cg_proto;
 540
 541                BUG_ON(!sk->sk_prot->proto_cgroup);
 542
 543                /* Socket cloning can throw us here with sk_cgrp already
 544                 * filled. It won't however, necessarily happen from
 545                 * process context. So the test for root memcg given
 546                 * the current task's memcg won't help us in this case.
 547                 *
 548                 * Respecting the original socket's memcg is a better
 549                 * decision in this case.
 550                 */
 551                if (sk->sk_cgrp) {
 552                        BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
 553                        css_get(&sk->sk_cgrp->memcg->css);
 554                        return;
 555                }
 556
 557                rcu_read_lock();
 558                memcg = mem_cgroup_from_task(current);
 559                cg_proto = sk->sk_prot->proto_cgroup(memcg);
 560                if (!mem_cgroup_is_root(memcg) &&
 561                    memcg_proto_active(cg_proto) &&
 562                    css_tryget_online(&memcg->css)) {
 563                        sk->sk_cgrp = cg_proto;
 564                }
 565                rcu_read_unlock();
 566        }
 567}
 568EXPORT_SYMBOL(sock_update_memcg);
 569
 570void sock_release_memcg(struct sock *sk)
 571{
 572        if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
 573                struct mem_cgroup *memcg;
 574                WARN_ON(!sk->sk_cgrp->memcg);
 575                memcg = sk->sk_cgrp->memcg;
 576                css_put(&sk->sk_cgrp->memcg->css);
 577        }
 578}
 579
 580struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
 581{
 582        if (!memcg || mem_cgroup_is_root(memcg))
 583                return NULL;
 584
 585        return &memcg->tcp_mem;
 586}
 587EXPORT_SYMBOL(tcp_proto_cgroup);
 588
 589static void disarm_sock_keys(struct mem_cgroup *memcg)
 590{
 591        if (!memcg_proto_activated(&memcg->tcp_mem))
 592                return;
 593        static_key_slow_dec(&memcg_socket_limit_enabled);
 594}
 595#else
 596static void disarm_sock_keys(struct mem_cgroup *memcg)
 597{
 598}
 599#endif
 600
 601#ifdef CONFIG_MEMCG_KMEM
 602/*
 603 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
 604 * The main reason for not using cgroup id for this:
 605 *  this works better in sparse environments, where we have a lot of memcgs,
 606 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 607 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 608 *  200 entry array for that.
 609 *
 610 * The current size of the caches array is stored in
 611 * memcg_limited_groups_array_size.  It will double each time we have to
 612 * increase it.
 613 */
 614static DEFINE_IDA(kmem_limited_groups);
 615int memcg_limited_groups_array_size;
 616
 617/*
 618 * MIN_SIZE is different than 1, because we would like to avoid going through
 619 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 620 * cgroups is a reasonable guess. In the future, it could be a parameter or
 621 * tunable, but that is strictly not necessary.
 622 *
 623 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 624 * this constant directly from cgroup, but it is understandable that this is
 625 * better kept as an internal representation in cgroup.c. In any case, the
 626 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 627 * increase ours as well if it increases.
 628 */
 629#define MEMCG_CACHES_MIN_SIZE 4
 630#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 631
 632/*
 633 * A lot of the calls to the cache allocation functions are expected to be
 634 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 635 * conditional to this static branch, we'll have to allow modules that does
 636 * kmem_cache_alloc and the such to see this symbol as well
 637 */
 638struct static_key memcg_kmem_enabled_key;
 639EXPORT_SYMBOL(memcg_kmem_enabled_key);
 640
 641static void memcg_free_cache_id(int id);
 642
 643static void disarm_kmem_keys(struct mem_cgroup *memcg)
 644{
 645        if (memcg_kmem_is_active(memcg)) {
 646                static_key_slow_dec(&memcg_kmem_enabled_key);
 647                memcg_free_cache_id(memcg->kmemcg_id);
 648        }
 649        /*
 650         * This check can't live in kmem destruction function,
 651         * since the charges will outlive the cgroup
 652         */
 653        WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
 654}
 655#else
 656static void disarm_kmem_keys(struct mem_cgroup *memcg)
 657{
 658}
 659#endif /* CONFIG_MEMCG_KMEM */
 660
 661static void disarm_static_keys(struct mem_cgroup *memcg)
 662{
 663        disarm_sock_keys(memcg);
 664        disarm_kmem_keys(memcg);
 665}
 666
 667static void drain_all_stock_async(struct mem_cgroup *memcg);
 668
 669static struct mem_cgroup_per_zone *
 670mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
 671{
 672        int nid = zone_to_nid(zone);
 673        int zid = zone_idx(zone);
 674
 675        return &memcg->nodeinfo[nid]->zoneinfo[zid];
 676}
 677
 678struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
 679{
 680        return &memcg->css;
 681}
 682
 683static struct mem_cgroup_per_zone *
 684mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
 685{
 686        int nid = page_to_nid(page);
 687        int zid = page_zonenum(page);
 688
 689        return &memcg->nodeinfo[nid]->zoneinfo[zid];
 690}
 691
 692static struct mem_cgroup_tree_per_zone *
 693soft_limit_tree_node_zone(int nid, int zid)
 694{
 695        return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 696}
 697
 698static struct mem_cgroup_tree_per_zone *
 699soft_limit_tree_from_page(struct page *page)
 700{
 701        int nid = page_to_nid(page);
 702        int zid = page_zonenum(page);
 703
 704        return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 705}
 706
 707static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
 708                                         struct mem_cgroup_tree_per_zone *mctz,
 709                                         unsigned long long new_usage_in_excess)
 710{
 711        struct rb_node **p = &mctz->rb_root.rb_node;
 712        struct rb_node *parent = NULL;
 713        struct mem_cgroup_per_zone *mz_node;
 714
 715        if (mz->on_tree)
 716                return;
 717
 718        mz->usage_in_excess = new_usage_in_excess;
 719        if (!mz->usage_in_excess)
 720                return;
 721        while (*p) {
 722                parent = *p;
 723                mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
 724                                        tree_node);
 725                if (mz->usage_in_excess < mz_node->usage_in_excess)
 726                        p = &(*p)->rb_left;
 727                /*
 728                 * We can't avoid mem cgroups that are over their soft
 729                 * limit by the same amount
 730                 */
 731                else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 732                        p = &(*p)->rb_right;
 733        }
 734        rb_link_node(&mz->tree_node, parent, p);
 735        rb_insert_color(&mz->tree_node, &mctz->rb_root);
 736        mz->on_tree = true;
 737}
 738
 739static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 740                                         struct mem_cgroup_tree_per_zone *mctz)
 741{
 742        if (!mz->on_tree)
 743                return;
 744        rb_erase(&mz->tree_node, &mctz->rb_root);
 745        mz->on_tree = false;
 746}
 747
 748static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 749                                       struct mem_cgroup_tree_per_zone *mctz)
 750{
 751        unsigned long flags;
 752
 753        spin_lock_irqsave(&mctz->lock, flags);
 754        __mem_cgroup_remove_exceeded(mz, mctz);
 755        spin_unlock_irqrestore(&mctz->lock, flags);
 756}
 757
 758
 759static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 760{
 761        unsigned long long excess;
 762        struct mem_cgroup_per_zone *mz;
 763        struct mem_cgroup_tree_per_zone *mctz;
 764
 765        mctz = soft_limit_tree_from_page(page);
 766        /*
 767         * Necessary to update all ancestors when hierarchy is used.
 768         * because their event counter is not touched.
 769         */
 770        for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 771                mz = mem_cgroup_page_zoneinfo(memcg, page);
 772                excess = res_counter_soft_limit_excess(&memcg->res);
 773                /*
 774                 * We have to update the tree if mz is on RB-tree or
 775                 * mem is over its softlimit.
 776                 */
 777                if (excess || mz->on_tree) {
 778                        unsigned long flags;
 779
 780                        spin_lock_irqsave(&mctz->lock, flags);
 781                        /* if on-tree, remove it */
 782                        if (mz->on_tree)
 783                                __mem_cgroup_remove_exceeded(mz, mctz);
 784                        /*
 785                         * Insert again. mz->usage_in_excess will be updated.
 786                         * If excess is 0, no tree ops.
 787                         */
 788                        __mem_cgroup_insert_exceeded(mz, mctz, excess);
 789                        spin_unlock_irqrestore(&mctz->lock, flags);
 790                }
 791        }
 792}
 793
 794static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 795{
 796        struct mem_cgroup_tree_per_zone *mctz;
 797        struct mem_cgroup_per_zone *mz;
 798        int nid, zid;
 799
 800        for_each_node(nid) {
 801                for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 802                        mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 803                        mctz = soft_limit_tree_node_zone(nid, zid);
 804                        mem_cgroup_remove_exceeded(mz, mctz);
 805                }
 806        }
 807}
 808
 809static struct mem_cgroup_per_zone *
 810__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 811{
 812        struct rb_node *rightmost = NULL;
 813        struct mem_cgroup_per_zone *mz;
 814
 815retry:
 816        mz = NULL;
 817        rightmost = rb_last(&mctz->rb_root);
 818        if (!rightmost)
 819                goto done;              /* Nothing to reclaim from */
 820
 821        mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
 822        /*
 823         * Remove the node now but someone else can add it back,
 824         * we will to add it back at the end of reclaim to its correct
 825         * position in the tree.
 826         */
 827        __mem_cgroup_remove_exceeded(mz, mctz);
 828        if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
 829            !css_tryget_online(&mz->memcg->css))
 830                goto retry;
 831done:
 832        return mz;
 833}
 834
 835static struct mem_cgroup_per_zone *
 836mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 837{
 838        struct mem_cgroup_per_zone *mz;
 839
 840        spin_lock_irq(&mctz->lock);
 841        mz = __mem_cgroup_largest_soft_limit_node(mctz);
 842        spin_unlock_irq(&mctz->lock);
 843        return mz;
 844}
 845
 846/*
 847 * Implementation Note: reading percpu statistics for memcg.
 848 *
 849 * Both of vmstat[] and percpu_counter has threshold and do periodic
 850 * synchronization to implement "quick" read. There are trade-off between
 851 * reading cost and precision of value. Then, we may have a chance to implement
 852 * a periodic synchronizion of counter in memcg's counter.
 853 *
 854 * But this _read() function is used for user interface now. The user accounts
 855 * memory usage by memory cgroup and he _always_ requires exact value because
 856 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 857 * have to visit all online cpus and make sum. So, for now, unnecessary
 858 * synchronization is not implemented. (just implemented for cpu hotplug)
 859 *
 860 * If there are kernel internal actions which can make use of some not-exact
 861 * value, and reading all cpu value can be performance bottleneck in some
 862 * common workload, threashold and synchonization as vmstat[] should be
 863 * implemented.
 864 */
 865static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 866                                 enum mem_cgroup_stat_index idx)
 867{
 868        long val = 0;
 869        int cpu;
 870
 871        get_online_cpus();
 872        for_each_online_cpu(cpu)
 873                val += per_cpu(memcg->stat->count[idx], cpu);
 874#ifdef CONFIG_HOTPLUG_CPU
 875        spin_lock(&memcg->pcp_counter_lock);
 876        val += memcg->nocpu_base.count[idx];
 877        spin_unlock(&memcg->pcp_counter_lock);
 878#endif
 879        put_online_cpus();
 880        return val;
 881}
 882
 883static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 884                                            enum mem_cgroup_events_index idx)
 885{
 886        unsigned long val = 0;
 887        int cpu;
 888
 889        get_online_cpus();
 890        for_each_online_cpu(cpu)
 891                val += per_cpu(memcg->stat->events[idx], cpu);
 892#ifdef CONFIG_HOTPLUG_CPU
 893        spin_lock(&memcg->pcp_counter_lock);
 894        val += memcg->nocpu_base.events[idx];
 895        spin_unlock(&memcg->pcp_counter_lock);
 896#endif
 897        put_online_cpus();
 898        return val;
 899}
 900
 901static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 902                                         struct page *page,
 903                                         int nr_pages)
 904{
 905        /*
 906         * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 907         * counted as CACHE even if it's on ANON LRU.
 908         */
 909        if (PageAnon(page))
 910                __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 911                                nr_pages);
 912        else
 913                __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 914                                nr_pages);
 915
 916        if (PageTransHuge(page))
 917                __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
 918                                nr_pages);
 919
 920        /* pagein of a big page is an event. So, ignore page size */
 921        if (nr_pages > 0)
 922                __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 923        else {
 924                __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 925                nr_pages = -nr_pages; /* for event */
 926        }
 927
 928        __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 929}
 930
 931unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 932{
 933        struct mem_cgroup_per_zone *mz;
 934
 935        mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
 936        return mz->lru_size[lru];
 937}
 938
 939static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 940                                                  int nid,
 941                                                  unsigned int lru_mask)
 942{
 943        unsigned long nr = 0;
 944        int zid;
 945
 946        VM_BUG_ON((unsigned)nid >= nr_node_ids);
 947
 948        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 949                struct mem_cgroup_per_zone *mz;
 950                enum lru_list lru;
 951
 952                for_each_lru(lru) {
 953                        if (!(BIT(lru) & lru_mask))
 954                                continue;
 955                        mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 956                        nr += mz->lru_size[lru];
 957                }
 958        }
 959        return nr;
 960}
 961
 962static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 963                        unsigned int lru_mask)
 964{
 965        unsigned long nr = 0;
 966        int nid;
 967
 968        for_each_node_state(nid, N_MEMORY)
 969                nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 970        return nr;
 971}
 972
 973static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 974                                       enum mem_cgroup_events_target target)
 975{
 976        unsigned long val, next;
 977
 978        val = __this_cpu_read(memcg->stat->nr_page_events);
 979        next = __this_cpu_read(memcg->stat->targets[target]);
 980        /* from time_after() in jiffies.h */
 981        if ((long)next - (long)val < 0) {
 982                switch (target) {
 983                case MEM_CGROUP_TARGET_THRESH:
 984                        next = val + THRESHOLDS_EVENTS_TARGET;
 985                        break;
 986                case MEM_CGROUP_TARGET_SOFTLIMIT:
 987                        next = val + SOFTLIMIT_EVENTS_TARGET;
 988                        break;
 989                case MEM_CGROUP_TARGET_NUMAINFO:
 990                        next = val + NUMAINFO_EVENTS_TARGET;
 991                        break;
 992                default:
 993                        break;
 994                }
 995                __this_cpu_write(memcg->stat->targets[target], next);
 996                return true;
 997        }
 998        return false;
 999}
1000
1001/*
1002 * Check events in order.
1003 *
1004 */
1005static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1006{
1007        /* threshold event is triggered in finer grain than soft limit */
1008        if (unlikely(mem_cgroup_event_ratelimit(memcg,
1009                                                MEM_CGROUP_TARGET_THRESH))) {
1010                bool do_softlimit;
1011                bool do_numainfo __maybe_unused;
1012
1013                do_softlimit = mem_cgroup_event_ratelimit(memcg,
1014                                                MEM_CGROUP_TARGET_SOFTLIMIT);
1015#if MAX_NUMNODES > 1
1016                do_numainfo = mem_cgroup_event_ratelimit(memcg,
1017                                                MEM_CGROUP_TARGET_NUMAINFO);
1018#endif
1019                mem_cgroup_threshold(memcg);
1020                if (unlikely(do_softlimit))
1021                        mem_cgroup_update_tree(memcg, page);
1022#if MAX_NUMNODES > 1
1023                if (unlikely(do_numainfo))
1024                        atomic_inc(&memcg->numainfo_events);
1025#endif
1026        }
1027}
1028
1029struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1030{
1031        /*
1032         * mm_update_next_owner() may clear mm->owner to NULL
1033         * if it races with swapoff, page migration, etc.
1034         * So this can be called with p == NULL.
1035         */
1036        if (unlikely(!p))
1037                return NULL;
1038
1039        return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1040}
1041
1042static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1043{
1044        struct mem_cgroup *memcg = NULL;
1045
1046        rcu_read_lock();
1047        do {
1048                /*
1049                 * Page cache insertions can happen withou an
1050                 * actual mm context, e.g. during disk probing
1051                 * on boot, loopback IO, acct() writes etc.
1052                 */
1053                if (unlikely(!mm))
1054                        memcg = root_mem_cgroup;
1055                else {
1056                        memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1057                        if (unlikely(!memcg))
1058                                memcg = root_mem_cgroup;
1059                }
1060        } while (!css_tryget_online(&memcg->css));
1061        rcu_read_unlock();
1062        return memcg;
1063}
1064
1065/*
1066 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1067 * ref. count) or NULL if the whole root's subtree has been visited.
1068 *
1069 * helper function to be used by mem_cgroup_iter
1070 */
1071static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1072                struct mem_cgroup *last_visited)
1073{
1074        struct cgroup_subsys_state *prev_css, *next_css;
1075
1076        prev_css = last_visited ? &last_visited->css : NULL;
1077skip_node:
1078        next_css = css_next_descendant_pre(prev_css, &root->css);
1079
1080        /*
1081         * Even if we found a group we have to make sure it is
1082         * alive. css && !memcg means that the groups should be
1083         * skipped and we should continue the tree walk.
1084         * last_visited css is safe to use because it is
1085         * protected by css_get and the tree walk is rcu safe.
1086         *
1087         * We do not take a reference on the root of the tree walk
1088         * because we might race with the root removal when it would
1089         * be the only node in the iterated hierarchy and mem_cgroup_iter
1090         * would end up in an endless loop because it expects that at
1091         * least one valid node will be returned. Root cannot disappear
1092         * because caller of the iterator should hold it already so
1093         * skipping css reference should be safe.
1094         */
1095        if (next_css) {
1096                struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
1097
1098                if (next_css == &root->css)
1099                        return memcg;
1100
1101                if (css_tryget_online(next_css)) {
1102                        /*
1103                         * Make sure the memcg is initialized:
1104                         * mem_cgroup_css_online() orders the the
1105                         * initialization against setting the flag.
1106                         */
1107                        if (smp_load_acquire(&memcg->initialized))
1108                                return memcg;
1109                        css_put(next_css);
1110                }
1111
1112                prev_css = next_css;
1113                goto skip_node;
1114        }
1115
1116        return NULL;
1117}
1118
1119static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1120{
1121        /*
1122         * When a group in the hierarchy below root is destroyed, the
1123         * hierarchy iterator can no longer be trusted since it might
1124         * have pointed to the destroyed group.  Invalidate it.
1125         */
1126        atomic_inc(&root->dead_count);
1127}
1128
1129static struct mem_cgroup *
1130mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1131                     struct mem_cgroup *root,
1132                     int *sequence)
1133{
1134        struct mem_cgroup *position = NULL;
1135        /*
1136         * A cgroup destruction happens in two stages: offlining and
1137         * release.  They are separated by a RCU grace period.
1138         *
1139         * If the iterator is valid, we may still race with an
1140         * offlining.  The RCU lock ensures the object won't be
1141         * released, tryget will fail if we lost the race.
1142         */
1143        *sequence = atomic_read(&root->dead_count);
1144        if (iter->last_dead_count == *sequence) {
1145                smp_rmb();
1146                position = iter->last_visited;
1147
1148                /*
1149                 * We cannot take a reference to root because we might race
1150                 * with root removal and returning NULL would end up in
1151                 * an endless loop on the iterator user level when root
1152                 * would be returned all the time.
1153                 */
1154                if (position && position != root &&
1155                    !css_tryget_online(&position->css))
1156                        position = NULL;
1157        }
1158        return position;
1159}
1160
1161static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1162                                   struct mem_cgroup *last_visited,
1163                                   struct mem_cgroup *new_position,
1164                                   struct mem_cgroup *root,
1165                                   int sequence)
1166{
1167        /* root reference counting symmetric to mem_cgroup_iter_load */
1168        if (last_visited && last_visited != root)
1169                css_put(&last_visited->css);
1170        /*
1171         * We store the sequence count from the time @last_visited was
1172         * loaded successfully instead of rereading it here so that we
1173         * don't lose destruction events in between.  We could have
1174         * raced with the destruction of @new_position after all.
1175         */
1176        iter->last_visited = new_position;
1177        smp_wmb();
1178        iter->last_dead_count = sequence;
1179}
1180
1181/**
1182 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1183 * @root: hierarchy root
1184 * @prev: previously returned memcg, NULL on first invocation
1185 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1186 *
1187 * Returns references to children of the hierarchy below @root, or
1188 * @root itself, or %NULL after a full round-trip.
1189 *
1190 * Caller must pass the return value in @prev on subsequent
1191 * invocations for reference counting, or use mem_cgroup_iter_break()
1192 * to cancel a hierarchy walk before the round-trip is complete.
1193 *
1194 * Reclaimers can specify a zone and a priority level in @reclaim to
1195 * divide up the memcgs in the hierarchy among all concurrent
1196 * reclaimers operating on the same zone and priority.
1197 */
1198struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1199                                   struct mem_cgroup *prev,
1200                                   struct mem_cgroup_reclaim_cookie *reclaim)
1201{
1202        struct mem_cgroup *memcg = NULL;
1203        struct mem_cgroup *last_visited = NULL;
1204
1205        if (mem_cgroup_disabled())
1206                return NULL;
1207
1208        if (!root)
1209                root = root_mem_cgroup;
1210
1211        if (prev && !reclaim)
1212                last_visited = prev;
1213
1214        if (!root->use_hierarchy && root != root_mem_cgroup) {
1215                if (prev)
1216                        goto out_css_put;
1217                return root;
1218        }
1219
1220        rcu_read_lock();
1221        while (!memcg) {
1222                struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1223                int uninitialized_var(seq);
1224
1225                if (reclaim) {
1226                        struct mem_cgroup_per_zone *mz;
1227
1228                        mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1229                        iter = &mz->reclaim_iter[reclaim->priority];
1230                        if (prev && reclaim->generation != iter->generation) {
1231                                iter->last_visited = NULL;
1232                                goto out_unlock;
1233                        }
1234
1235                        last_visited = mem_cgroup_iter_load(iter, root, &seq);
1236                }
1237
1238                memcg = __mem_cgroup_iter_next(root, last_visited);
1239
1240                if (reclaim) {
1241                        mem_cgroup_iter_update(iter, last_visited, memcg, root,
1242                                        seq);
1243
1244                        if (!memcg)
1245                                iter->generation++;
1246                        else if (!prev && memcg)
1247                                reclaim->generation = iter->generation;
1248                }
1249
1250                if (prev && !memcg)
1251                        goto out_unlock;
1252        }
1253out_unlock:
1254        rcu_read_unlock();
1255out_css_put:
1256        if (prev && prev != root)
1257                css_put(&prev->css);
1258
1259        return memcg;
1260}
1261
1262/**
1263 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1264 * @root: hierarchy root
1265 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1266 */
1267void mem_cgroup_iter_break(struct mem_cgroup *root,
1268                           struct mem_cgroup *prev)
1269{
1270        if (!root)
1271                root = root_mem_cgroup;
1272        if (prev && prev != root)
1273                css_put(&prev->css);
1274}
1275
1276/*
1277 * Iteration constructs for visiting all cgroups (under a tree).  If
1278 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1279 * be used for reference counting.
1280 */
1281#define for_each_mem_cgroup_tree(iter, root)            \
1282        for (iter = mem_cgroup_iter(root, NULL, NULL);  \
1283             iter != NULL;                              \
1284             iter = mem_cgroup_iter(root, iter, NULL))
1285
1286#define for_each_mem_cgroup(iter)                       \
1287        for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
1288             iter != NULL;                              \
1289             iter = mem_cgroup_iter(NULL, iter, NULL))
1290
1291void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1292{
1293        struct mem_cgroup *memcg;
1294
1295        rcu_read_lock();
1296        memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1297        if (unlikely(!memcg))
1298                goto out;
1299
1300        switch (idx) {
1301        case PGFAULT:
1302                this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1303                break;
1304        case PGMAJFAULT:
1305                this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1306                break;
1307        default:
1308                BUG();
1309        }
1310out:
1311        rcu_read_unlock();
1312}
1313EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1314
1315/**
1316 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1317 * @zone: zone of the wanted lruvec
1318 * @memcg: memcg of the wanted lruvec
1319 *
1320 * Returns the lru list vector holding pages for the given @zone and
1321 * @mem.  This can be the global zone lruvec, if the memory controller
1322 * is disabled.
1323 */
1324struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1325                                      struct mem_cgroup *memcg)
1326{
1327        struct mem_cgroup_per_zone *mz;
1328        struct lruvec *lruvec;
1329
1330        if (mem_cgroup_disabled()) {
1331                lruvec = &zone->lruvec;
1332                goto out;
1333        }
1334
1335        mz = mem_cgroup_zone_zoneinfo(memcg, zone);
1336        lruvec = &mz->lruvec;
1337out:
1338        /*
1339         * Since a node can be onlined after the mem_cgroup was created,
1340         * we have to be prepared to initialize lruvec->zone here;
1341         * and if offlined then reonlined, we need to reinitialize it.
1342         */
1343        if (unlikely(lruvec->zone != zone))
1344                lruvec->zone = zone;
1345        return lruvec;
1346}
1347
1348/**
1349 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1350 * @page: the page
1351 * @zone: zone of the page
1352 */
1353struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1354{
1355        struct mem_cgroup_per_zone *mz;
1356        struct mem_cgroup *memcg;
1357        struct page_cgroup *pc;
1358        struct lruvec *lruvec;
1359
1360        if (mem_cgroup_disabled()) {
1361                lruvec = &zone->lruvec;
1362                goto out;
1363        }
1364
1365        pc = lookup_page_cgroup(page);
1366        memcg = pc->mem_cgroup;
1367
1368        /*
1369         * Surreptitiously switch any uncharged offlist page to root:
1370         * an uncharged page off lru does nothing to secure
1371         * its former mem_cgroup from sudden removal.
1372         *
1373         * Our caller holds lru_lock, and PageCgroupUsed is updated
1374         * under page_cgroup lock: between them, they make all uses
1375         * of pc->mem_cgroup safe.
1376         */
1377        if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1378                pc->mem_cgroup = memcg = root_mem_cgroup;
1379
1380        mz = mem_cgroup_page_zoneinfo(memcg, page);
1381        lruvec = &mz->lruvec;
1382out:
1383        /*
1384         * Since a node can be onlined after the mem_cgroup was created,
1385         * we have to be prepared to initialize lruvec->zone here;
1386         * and if offlined then reonlined, we need to reinitialize it.
1387         */
1388        if (unlikely(lruvec->zone != zone))
1389                lruvec->zone = zone;
1390        return lruvec;
1391}
1392
1393/**
1394 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1395 * @lruvec: mem_cgroup per zone lru vector
1396 * @lru: index of lru list the page is sitting on
1397 * @nr_pages: positive when adding or negative when removing
1398 *
1399 * This function must be called when a page is added to or removed from an
1400 * lru list.
1401 */
1402void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1403                                int nr_pages)
1404{
1405        struct mem_cgroup_per_zone *mz;
1406        unsigned long *lru_size;
1407
1408        if (mem_cgroup_disabled())
1409                return;
1410
1411        mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1412        lru_size = mz->lru_size + lru;
1413        *lru_size += nr_pages;
1414        VM_BUG_ON((long)(*lru_size) < 0);
1415}
1416
1417/*
1418 * Checks whether given mem is same or in the root_mem_cgroup's
1419 * hierarchy subtree
1420 */
1421bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1422                                  struct mem_cgroup *memcg)
1423{
1424        if (root_memcg == memcg)
1425                return true;
1426        if (!root_memcg->use_hierarchy || !memcg)
1427                return false;
1428        return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
1429}
1430
1431static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1432                                       struct mem_cgroup *memcg)
1433{
1434        bool ret;
1435
1436        rcu_read_lock();
1437        ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1438        rcu_read_unlock();
1439        return ret;
1440}
1441
1442bool task_in_mem_cgroup(struct task_struct *task,
1443                        const struct mem_cgroup *memcg)
1444{
1445        struct mem_cgroup *curr = NULL;
1446        struct task_struct *p;
1447        bool ret;
1448
1449        p = find_lock_task_mm(task);
1450        if (p) {
1451                curr = get_mem_cgroup_from_mm(p->mm);
1452                task_unlock(p);
1453        } else {
1454                /*
1455                 * All threads may have already detached their mm's, but the oom
1456                 * killer still needs to detect if they have already been oom
1457                 * killed to prevent needlessly killing additional tasks.
1458                 */
1459                rcu_read_lock();
1460                curr = mem_cgroup_from_task(task);
1461                if (curr)
1462                        css_get(&curr->css);
1463                rcu_read_unlock();
1464        }
1465        /*
1466         * We should check use_hierarchy of "memcg" not "curr". Because checking
1467         * use_hierarchy of "curr" here make this function true if hierarchy is
1468         * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1469         * hierarchy(even if use_hierarchy is disabled in "memcg").
1470         */
1471        ret = mem_cgroup_same_or_subtree(memcg, curr);
1472        css_put(&curr->css);
1473        return ret;
1474}
1475
1476int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1477{
1478        unsigned long inactive_ratio;
1479        unsigned long inactive;
1480        unsigned long active;
1481        unsigned long gb;
1482
1483        inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1484        active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1485
1486        gb = (inactive + active) >> (30 - PAGE_SHIFT);
1487        if (gb)
1488                inactive_ratio = int_sqrt(10 * gb);
1489        else
1490                inactive_ratio = 1;
1491
1492        return inactive * inactive_ratio < active;
1493}
1494
1495#define mem_cgroup_from_res_counter(counter, member)    \
1496        container_of(counter, struct mem_cgroup, member)
1497
1498/**
1499 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1500 * @memcg: the memory cgroup
1501 *
1502 * Returns the maximum amount of memory @mem can be charged with, in
1503 * pages.
1504 */
1505static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1506{
1507        unsigned long long margin;
1508
1509        margin = res_counter_margin(&memcg->res);
1510        if (do_swap_account)
1511                margin = min(margin, res_counter_margin(&memcg->memsw));
1512        return margin >> PAGE_SHIFT;
1513}
1514
1515int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1516{
1517        /* root ? */
1518        if (mem_cgroup_disabled() || !memcg->css.parent)
1519                return vm_swappiness;
1520
1521        return memcg->swappiness;
1522}
1523
1524/*
1525 * memcg->moving_account is used for checking possibility that some thread is
1526 * calling move_account(). When a thread on CPU-A starts moving pages under
1527 * a memcg, other threads should check memcg->moving_account under
1528 * rcu_read_lock(), like this:
1529 *
1530 *         CPU-A                                    CPU-B
1531 *                                              rcu_read_lock()
1532 *         memcg->moving_account+1              if (memcg->mocing_account)
1533 *                                                   take heavy locks.
1534 *         synchronize_rcu()                    update something.
1535 *                                              rcu_read_unlock()
1536 *         start move here.
1537 */
1538
1539static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1540{
1541        atomic_inc(&memcg->moving_account);
1542        synchronize_rcu();
1543}
1544
1545static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1546{
1547        /*
1548         * Now, mem_cgroup_clear_mc() may call this function with NULL.
1549         * We check NULL in callee rather than caller.
1550         */
1551        if (memcg)
1552                atomic_dec(&memcg->moving_account);
1553}
1554
1555/*
1556 * A routine for checking "mem" is under move_account() or not.
1557 *
1558 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1559 * moving cgroups. This is for waiting at high-memory pressure
1560 * caused by "move".
1561 */
1562static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1563{
1564        struct mem_cgroup *from;
1565        struct mem_cgroup *to;
1566        bool ret = false;
1567        /*
1568         * Unlike task_move routines, we access mc.to, mc.from not under
1569         * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1570         */
1571        spin_lock(&mc.lock);
1572        from = mc.from;
1573        to = mc.to;
1574        if (!from)
1575                goto unlock;
1576
1577        ret = mem_cgroup_same_or_subtree(memcg, from)
1578                || mem_cgroup_same_or_subtree(memcg, to);
1579unlock:
1580        spin_unlock(&mc.lock);
1581        return ret;
1582}
1583
1584static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1585{
1586        if (mc.moving_task && current != mc.moving_task) {
1587                if (mem_cgroup_under_move(memcg)) {
1588                        DEFINE_WAIT(wait);
1589                        prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1590                        /* moving charge context might have finished. */
1591                        if (mc.moving_task)
1592                                schedule();
1593                        finish_wait(&mc.waitq, &wait);
1594                        return true;
1595                }
1596        }
1597        return false;
1598}
1599
1600/*
1601 * Take this lock when
1602 * - a code tries to modify page's memcg while it's USED.
1603 * - a code tries to modify page state accounting in a memcg.
1604 */
1605static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1606                                  unsigned long *flags)
1607{
1608        spin_lock_irqsave(&memcg->move_lock, *flags);
1609}
1610
1611static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1612                                unsigned long *flags)
1613{
1614        spin_unlock_irqrestore(&memcg->move_lock, *flags);
1615}
1616
1617#define K(x) ((x) << (PAGE_SHIFT-10))
1618/**
1619 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1620 * @memcg: The memory cgroup that went over limit
1621 * @p: Task that is going to be killed
1622 *
1623 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1624 * enabled
1625 */
1626void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1627{
1628        /* oom_info_lock ensures that parallel ooms do not interleave */
1629        static DEFINE_MUTEX(oom_info_lock);
1630        struct mem_cgroup *iter;
1631        unsigned int i;
1632
1633        if (!p)
1634                return;
1635
1636        mutex_lock(&oom_info_lock);
1637        rcu_read_lock();
1638
1639        pr_info("Task in ");
1640        pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1641        pr_info(" killed as a result of limit of ");
1642        pr_cont_cgroup_path(memcg->css.cgroup);
1643        pr_info("\n");
1644
1645        rcu_read_unlock();
1646
1647        pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1648                res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1649                res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1650                res_counter_read_u64(&memcg->res, RES_FAILCNT));
1651        pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1652                res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1653                res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1654                res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1655        pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1656                res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1657                res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1658                res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1659
1660        for_each_mem_cgroup_tree(iter, memcg) {
1661                pr_info("Memory cgroup stats for ");
1662                pr_cont_cgroup_path(iter->css.cgroup);
1663                pr_cont(":");
1664
1665                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1666                        if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1667                                continue;
1668                        pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1669                                K(mem_cgroup_read_stat(iter, i)));
1670                }
1671
1672                for (i = 0; i < NR_LRU_LISTS; i++)
1673                        pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1674                                K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1675
1676                pr_cont("\n");
1677        }
1678        mutex_unlock(&oom_info_lock);
1679}
1680
1681/*
1682 * This function returns the number of memcg under hierarchy tree. Returns
1683 * 1(self count) if no children.
1684 */
1685static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1686{
1687        int num = 0;
1688        struct mem_cgroup *iter;
1689
1690        for_each_mem_cgroup_tree(iter, memcg)
1691                num++;
1692        return num;
1693}
1694
1695/*
1696 * Return the memory (and swap, if configured) limit for a memcg.
1697 */
1698static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1699{
1700        u64 limit;
1701
1702        limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1703
1704        /*
1705         * Do not consider swap space if we cannot swap due to swappiness
1706         */
1707        if (mem_cgroup_swappiness(memcg)) {
1708                u64 memsw;
1709
1710                limit += total_swap_pages << PAGE_SHIFT;
1711                memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1712
1713                /*
1714                 * If memsw is finite and limits the amount of swap space
1715                 * available to this memcg, return that limit.
1716                 */
1717                limit = min(limit, memsw);
1718        }
1719
1720        return limit;
1721}
1722
1723static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1724                                     int order)
1725{
1726        struct mem_cgroup *iter;
1727        unsigned long chosen_points = 0;
1728        unsigned long totalpages;
1729        unsigned int points = 0;
1730        struct task_struct *chosen = NULL;
1731
1732        /*
1733         * If current has a pending SIGKILL or is exiting, then automatically
1734         * select it.  The goal is to allow it to allocate so that it may
1735         * quickly exit and free its memory.
1736         */
1737        if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1738                set_thread_flag(TIF_MEMDIE);
1739                return;
1740        }
1741
1742        check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1743        totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1744        for_each_mem_cgroup_tree(iter, memcg) {
1745                struct css_task_iter it;
1746                struct task_struct *task;
1747
1748                css_task_iter_start(&iter->css, &it);
1749                while ((task = css_task_iter_next(&it))) {
1750                        switch (oom_scan_process_thread(task, totalpages, NULL,
1751                                                        false)) {
1752                        case OOM_SCAN_SELECT:
1753                                if (chosen)
1754                                        put_task_struct(chosen);
1755                                chosen = task;
1756                                chosen_points = ULONG_MAX;
1757                                get_task_struct(chosen);
1758                                /* fall through */
1759                        case OOM_SCAN_CONTINUE:
1760                                continue;
1761                        case OOM_SCAN_ABORT:
1762                                css_task_iter_end(&it);
1763                                mem_cgroup_iter_break(memcg, iter);
1764                                if (chosen)
1765                                        put_task_struct(chosen);
1766                                return;
1767                        case OOM_SCAN_OK:
1768                                break;
1769                        };
1770                        points = oom_badness(task, memcg, NULL, totalpages);
1771                        if (!points || points < chosen_points)
1772                                continue;
1773                        /* Prefer thread group leaders for display purposes */
1774                        if (points == chosen_points &&
1775                            thread_group_leader(chosen))
1776                                continue;
1777
1778                        if (chosen)
1779                                put_task_struct(chosen);
1780                        chosen = task;
1781                        chosen_points = points;
1782                        get_task_struct(chosen);
1783                }
1784                css_task_iter_end(&it);
1785        }
1786
1787        if (!chosen)
1788                return;
1789        points = chosen_points * 1000 / totalpages;
1790        oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1791                         NULL, "Memory cgroup out of memory");
1792}
1793
1794/**
1795 * test_mem_cgroup_node_reclaimable
1796 * @memcg: the target memcg
1797 * @nid: the node ID to be checked.
1798 * @noswap : specify true here if the user wants flle only information.
1799 *
1800 * This function returns whether the specified memcg contains any
1801 * reclaimable pages on a node. Returns true if there are any reclaimable
1802 * pages in the node.
1803 */
1804static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1805                int nid, bool noswap)
1806{
1807        if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1808                return true;
1809        if (noswap || !total_swap_pages)
1810                return false;
1811        if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1812                return true;
1813        return false;
1814
1815}
1816#if MAX_NUMNODES > 1
1817
1818/*
1819 * Always updating the nodemask is not very good - even if we have an empty
1820 * list or the wrong list here, we can start from some node and traverse all
1821 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1822 *
1823 */
1824static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1825{
1826        int nid;
1827        /*
1828         * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1829         * pagein/pageout changes since the last update.
1830         */
1831        if (!atomic_read(&memcg->numainfo_events))
1832                return;
1833        if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1834                return;
1835
1836        /* make a nodemask where this memcg uses memory from */
1837        memcg->scan_nodes = node_states[N_MEMORY];
1838
1839        for_each_node_mask(nid, node_states[N_MEMORY]) {
1840
1841                if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1842                        node_clear(nid, memcg->scan_nodes);
1843        }
1844
1845        atomic_set(&memcg->numainfo_events, 0);
1846        atomic_set(&memcg->numainfo_updating, 0);
1847}
1848
1849/*
1850 * Selecting a node where we start reclaim from. Because what we need is just
1851 * reducing usage counter, start from anywhere is O,K. Considering
1852 * memory reclaim from current node, there are pros. and cons.
1853 *
1854 * Freeing memory from current node means freeing memory from a node which
1855 * we'll use or we've used. So, it may make LRU bad. And if several threads
1856 * hit limits, it will see a contention on a node. But freeing from remote
1857 * node means more costs for memory reclaim because of memory latency.
1858 *
1859 * Now, we use round-robin. Better algorithm is welcomed.
1860 */
1861int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1862{
1863        int node;
1864
1865        mem_cgroup_may_update_nodemask(memcg);
1866        node = memcg->last_scanned_node;
1867
1868        node = next_node(node, memcg->scan_nodes);
1869        if (node == MAX_NUMNODES)
1870                node = first_node(memcg->scan_nodes);
1871        /*
1872         * We call this when we hit limit, not when pages are added to LRU.
1873         * No LRU may hold pages because all pages are UNEVICTABLE or
1874         * memcg is too small and all pages are not on LRU. In that case,
1875         * we use curret node.
1876         */
1877        if (unlikely(node == MAX_NUMNODES))
1878                node = numa_node_id();
1879
1880        memcg->last_scanned_node = node;
1881        return node;
1882}
1883
1884/*
1885 * Check all nodes whether it contains reclaimable pages or not.
1886 * For quick scan, we make use of scan_nodes. This will allow us to skip
1887 * unused nodes. But scan_nodes is lazily updated and may not cotain
1888 * enough new information. We need to do double check.
1889 */
1890static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1891{
1892        int nid;
1893
1894        /*
1895         * quick check...making use of scan_node.
1896         * We can skip unused nodes.
1897         */
1898        if (!nodes_empty(memcg->scan_nodes)) {
1899                for (nid = first_node(memcg->scan_nodes);
1900                     nid < MAX_NUMNODES;
1901                     nid = next_node(nid, memcg->scan_nodes)) {
1902
1903                        if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1904                                return true;
1905                }
1906        }
1907        /*
1908         * Check rest of nodes.
1909         */
1910        for_each_node_state(nid, N_MEMORY) {
1911                if (node_isset(nid, memcg->scan_nodes))
1912                        continue;
1913                if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1914                        return true;
1915        }
1916        return false;
1917}
1918
1919#else
1920int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1921{
1922        return 0;
1923}
1924
1925static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1926{
1927        return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1928}
1929#endif
1930
1931static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1932                                   struct zone *zone,
1933                                   gfp_t gfp_mask,
1934                                   unsigned long *total_scanned)
1935{
1936        struct mem_cgroup *victim = NULL;
1937        int total = 0;
1938        int loop = 0;
1939        unsigned long excess;
1940        unsigned long nr_scanned;
1941        struct mem_cgroup_reclaim_cookie reclaim = {
1942                .zone = zone,
1943                .priority = 0,
1944        };
1945
1946        excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
1947
1948        while (1) {
1949                victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1950                if (!victim) {
1951                        loop++;
1952                        if (loop >= 2) {
1953                                /*
1954                                 * If we have not been able to reclaim
1955                                 * anything, it might because there are
1956                                 * no reclaimable pages under this hierarchy
1957                                 */
1958                                if (!total)
1959                                        break;
1960                                /*
1961                                 * We want to do more targeted reclaim.
1962                                 * excess >> 2 is not to excessive so as to
1963                                 * reclaim too much, nor too less that we keep
1964                                 * coming back to reclaim from this cgroup
1965                                 */
1966                                if (total >= (excess >> 2) ||
1967                                        (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1968                                        break;
1969                        }
1970                        continue;
1971                }
1972                if (!mem_cgroup_reclaimable(victim, false))
1973                        continue;
1974                total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1975                                                     zone, &nr_scanned);
1976                *total_scanned += nr_scanned;
1977                if (!res_counter_soft_limit_excess(&root_memcg->res))
1978                        break;
1979        }
1980        mem_cgroup_iter_break(root_memcg, victim);
1981        return total;
1982}
1983
1984#ifdef CONFIG_LOCKDEP
1985static struct lockdep_map memcg_oom_lock_dep_map = {
1986        .name = "memcg_oom_lock",
1987};
1988#endif
1989
1990static DEFINE_SPINLOCK(memcg_oom_lock);
1991
1992/*
1993 * Check OOM-Killer is already running under our hierarchy.
1994 * If someone is running, return false.
1995 */
1996static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1997{
1998        struct mem_cgroup *iter, *failed = NULL;
1999
2000        spin_lock(&memcg_oom_lock);
2001
2002        for_each_mem_cgroup_tree(iter, memcg) {
2003                if (iter->oom_lock) {
2004                        /*
2005                         * this subtree of our hierarchy is already locked
2006                         * so we cannot give a lock.
2007                         */
2008                        failed = iter;
2009                        mem_cgroup_iter_break(memcg, iter);
2010                        break;
2011                } else
2012                        iter->oom_lock = true;
2013        }
2014
2015        if (failed) {
2016                /*
2017                 * OK, we failed to lock the whole subtree so we have
2018                 * to clean up what we set up to the failing subtree
2019                 */
2020                for_each_mem_cgroup_tree(iter, memcg) {
2021                        if (iter == failed) {
2022                                mem_cgroup_iter_break(memcg, iter);
2023                                break;
2024                        }
2025                        iter->oom_lock = false;
2026                }
2027        } else
2028                mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2029
2030        spin_unlock(&memcg_oom_lock);
2031
2032        return !failed;
2033}
2034
2035static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2036{
2037        struct mem_cgroup *iter;
2038
2039        spin_lock(&memcg_oom_lock);
2040        mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2041        for_each_mem_cgroup_tree(iter, memcg)
2042                iter->oom_lock = false;
2043        spin_unlock(&memcg_oom_lock);
2044}
2045
2046static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
2047{
2048        struct mem_cgroup *iter;
2049
2050        for_each_mem_cgroup_tree(iter, memcg)
2051                atomic_inc(&iter->under_oom);
2052}
2053
2054static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
2055{
2056        struct mem_cgroup *iter;
2057
2058        /*
2059         * When a new child is created while the hierarchy is under oom,
2060         * mem_cgroup_oom_lock() may not be called. We have to use
2061         * atomic_add_unless() here.
2062         */
2063        for_each_mem_cgroup_tree(iter, memcg)
2064                atomic_add_unless(&iter->under_oom, -1, 0);
2065}
2066
2067static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2068
2069struct oom_wait_info {
2070        struct mem_cgroup *memcg;
2071        wait_queue_t    wait;
2072};
2073
2074static int memcg_oom_wake_function(wait_queue_t *wait,
2075        unsigned mode, int sync, void *arg)
2076{
2077        struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2078        struct mem_cgroup *oom_wait_memcg;
2079        struct oom_wait_info *oom_wait_info;
2080
2081        oom_wait_info = container_of(wait, struct oom_wait_info, wait);
2082        oom_wait_memcg = oom_wait_info->memcg;
2083
2084        /*
2085         * Both of oom_wait_info->memcg and wake_memcg are stable under us.
2086         * Then we can use css_is_ancestor without taking care of RCU.
2087         */
2088        if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2089                && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
2090                return 0;
2091        return autoremove_wake_function(wait, mode, sync, arg);
2092}
2093
2094static void memcg_wakeup_oom(struct mem_cgroup *memcg)
2095{
2096        atomic_inc(&memcg->oom_wakeups);
2097        /* for filtering, pass "memcg" as argument. */
2098        __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
2099}
2100
2101static void memcg_oom_recover(struct mem_cgroup *memcg)
2102{
2103        if (memcg && atomic_read(&memcg->under_oom))
2104                memcg_wakeup_oom(memcg);
2105}
2106
2107static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2108{
2109        if (!current->memcg_oom.may_oom)
2110                return;
2111        /*
2112         * We are in the middle of the charge context here, so we
2113         * don't want to block when potentially sitting on a callstack
2114         * that holds all kinds of filesystem and mm locks.
2115         *
2116         * Also, the caller may handle a failed allocation gracefully
2117         * (like optional page cache readahead) and so an OOM killer
2118         * invocation might not even be necessary.
2119         *
2120         * That's why we don't do anything here except remember the
2121         * OOM context and then deal with it at the end of the page
2122         * fault when the stack is unwound, the locks are released,
2123         * and when we know whether the fault was overall successful.
2124         */
2125        css_get(&memcg->css);
2126        current->memcg_oom.memcg = memcg;
2127        current->memcg_oom.gfp_mask = mask;
2128        current->memcg_oom.order = order;
2129}
2130
2131/**
2132 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2133 * @handle: actually kill/wait or just clean up the OOM state
2134 *
2135 * This has to be called at the end of a page fault if the memcg OOM
2136 * handler was enabled.
2137 *
2138 * Memcg supports userspace OOM handling where failed allocations must
2139 * sleep on a waitqueue until the userspace task resolves the
2140 * situation.  Sleeping directly in the charge context with all kinds
2141 * of locks held is not a good idea, instead we remember an OOM state
2142 * in the task and mem_cgroup_oom_synchronize() has to be called at
2143 * the end of the page fault to complete the OOM handling.
2144 *
2145 * Returns %true if an ongoing memcg OOM situation was detected and
2146 * completed, %false otherwise.
2147 */
2148bool mem_cgroup_oom_synchronize(bool handle)
2149{
2150        struct mem_cgroup *memcg = current->memcg_oom.memcg;
2151        struct oom_wait_info owait;
2152        bool locked;
2153
2154        /* OOM is global, do not handle */
2155        if (!memcg)
2156                return false;
2157
2158        if (!handle)
2159                goto cleanup;
2160
2161        owait.memcg = memcg;
2162        owait.wait.flags = 0;
2163        owait.wait.func = memcg_oom_wake_function;
2164        owait.wait.private = current;
2165        INIT_LIST_HEAD(&owait.wait.task_list);
2166
2167        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2168        mem_cgroup_mark_under_oom(memcg);
2169
2170        locked = mem_cgroup_oom_trylock(memcg);
2171
2172        if (locked)
2173                mem_cgroup_oom_notify(memcg);
2174
2175        if (locked && !memcg->oom_kill_disable) {
2176                mem_cgroup_unmark_under_oom(memcg);
2177                finish_wait(&memcg_oom_waitq, &owait.wait);
2178                mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2179                                         current->memcg_oom.order);
2180        } else {
2181                schedule();
2182                mem_cgroup_unmark_under_oom(memcg);
2183                finish_wait(&memcg_oom_waitq, &owait.wait);
2184        }
2185
2186        if (locked) {
2187                mem_cgroup_oom_unlock(memcg);
2188                /*
2189                 * There is no guarantee that an OOM-lock contender
2190                 * sees the wakeups triggered by the OOM kill
2191                 * uncharges.  Wake any sleepers explicitely.
2192                 */
2193                memcg_oom_recover(memcg);
2194        }
2195cleanup:
2196        current->memcg_oom.memcg = NULL;
2197        css_put(&memcg->css);
2198        return true;
2199}
2200
2201/**
2202 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
2203 * @page: page that is going to change accounted state
2204 * @locked: &memcg->move_lock slowpath was taken
2205 * @flags: IRQ-state flags for &memcg->move_lock
2206 *
2207 * This function must mark the beginning of an accounted page state
2208 * change to prevent double accounting when the page is concurrently
2209 * being moved to another memcg:
2210 *
2211 *   memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
2212 *   if (TestClearPageState(page))
2213 *     mem_cgroup_update_page_stat(memcg, state, -1);
2214 *   mem_cgroup_end_page_stat(memcg, locked, flags);
2215 *
2216 * The RCU lock is held throughout the transaction.  The fast path can
2217 * get away without acquiring the memcg->move_lock (@locked is false)
2218 * because page moving starts with an RCU grace period.
2219 *
2220 * The RCU lock also protects the memcg from being freed when the page
2221 * state that is going to change is the only thing preventing the page
2222 * from being uncharged.  E.g. end-writeback clearing PageWriteback(),
2223 * which allows migration to go ahead and uncharge the page before the
2224 * account transaction might be complete.
2225 */
2226struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
2227                                              bool *locked,
2228                                              unsigned long *flags)
2229{
2230        struct mem_cgroup *memcg;
2231        struct page_cgroup *pc;
2232
2233        rcu_read_lock();
2234
2235        if (mem_cgroup_disabled())
2236                return NULL;
2237
2238        pc = lookup_page_cgroup(page);
2239again:
2240        memcg = pc->mem_cgroup;
2241        if (unlikely(!memcg || !PageCgroupUsed(pc)))
2242                return NULL;
2243
2244        *locked = false;
2245        if (atomic_read(&memcg->moving_account) <= 0)
2246                return memcg;
2247
2248        move_lock_mem_cgroup(memcg, flags);
2249        if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2250                move_unlock_mem_cgroup(memcg, flags);
2251                goto again;
2252        }
2253        *locked = true;
2254
2255        return memcg;
2256}
2257
2258/**
2259 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2260 * @memcg: the memcg that was accounted against
2261 * @locked: value received from mem_cgroup_begin_page_stat()
2262 * @flags: value received from mem_cgroup_begin_page_stat()
2263 */
2264void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
2265                              unsigned long flags)
2266{
2267        if (memcg && locked)
2268                move_unlock_mem_cgroup(memcg, &flags);
2269
2270        rcu_read_unlock();
2271}
2272
2273/**
2274 * mem_cgroup_update_page_stat - update page state statistics
2275 * @memcg: memcg to account against
2276 * @idx: page state item to account
2277 * @val: number of pages (positive or negative)
2278 *
2279 * See mem_cgroup_begin_page_stat() for locking requirements.
2280 */
2281void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
2282                                 enum mem_cgroup_stat_index idx, int val)
2283{
2284        VM_BUG_ON(!rcu_read_lock_held());
2285
2286        if (memcg)
2287                this_cpu_add(memcg->stat->count[idx], val);
2288}
2289
2290/*
2291 * size of first charge trial. "32" comes from vmscan.c's magic value.
2292 * TODO: maybe necessary to use big numbers in big irons.
2293 */
2294#define CHARGE_BATCH    32U
2295struct memcg_stock_pcp {
2296        struct mem_cgroup *cached; /* this never be root cgroup */
2297        unsigned int nr_pages;
2298        struct work_struct work;
2299        unsigned long flags;
2300#define FLUSHING_CACHED_CHARGE  0
2301};
2302static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2303static DEFINE_MUTEX(percpu_charge_mutex);
2304
2305/**
2306 * consume_stock: Try to consume stocked charge on this cpu.
2307 * @memcg: memcg to consume from.
2308 * @nr_pages: how many pages to charge.
2309 *
2310 * The charges will only happen if @memcg matches the current cpu's memcg
2311 * stock, and at least @nr_pages are available in that stock.  Failure to
2312 * service an allocation will refill the stock.
2313 *
2314 * returns true if successful, false otherwise.
2315 */
2316static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2317{
2318        struct memcg_stock_pcp *stock;
2319        bool ret = true;
2320
2321        if (nr_pages > CHARGE_BATCH)
2322                return false;
2323
2324        stock = &get_cpu_var(memcg_stock);
2325        if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2326                stock->nr_pages -= nr_pages;
2327        else /* need to call res_counter_charge */
2328                ret = false;
2329        put_cpu_var(memcg_stock);
2330        return ret;
2331}
2332
2333/*
2334 * Returns stocks cached in percpu to res_counter and reset cached information.
2335 */
2336static void drain_stock(struct memcg_stock_pcp *stock)
2337{
2338        struct mem_cgroup *old = stock->cached;
2339
2340        if (stock->nr_pages) {
2341                unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2342
2343                res_counter_uncharge(&old->res, bytes);
2344                if (do_swap_account)
2345                        res_counter_uncharge(&old->memsw, bytes);
2346                stock->nr_pages = 0;
2347        }
2348        stock->cached = NULL;
2349}
2350
2351/*
2352 * This must be called under preempt disabled or must be called by
2353 * a thread which is pinned to local cpu.
2354 */
2355static void drain_local_stock(struct work_struct *dummy)
2356{
2357        struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
2358        drain_stock(stock);
2359        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2360}
2361
2362static void __init memcg_stock_init(void)
2363{
2364        int cpu;
2365
2366        for_each_possible_cpu(cpu) {
2367                struct memcg_stock_pcp *stock =
2368                                        &per_cpu(memcg_stock, cpu);
2369                INIT_WORK(&stock->work, drain_local_stock);
2370        }
2371}
2372
2373/*
2374 * Cache charges(val) which is from res_counter, to local per_cpu area.
2375 * This will be consumed by consume_stock() function, later.
2376 */
2377static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2378{
2379        struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2380
2381        if (stock->cached != memcg) { /* reset if necessary */
2382                drain_stock(stock);
2383                stock->cached = memcg;
2384        }
2385        stock->nr_pages += nr_pages;
2386        put_cpu_var(memcg_stock);
2387}
2388
2389/*
2390 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2391 * of the hierarchy under it. sync flag says whether we should block
2392 * until the work is done.
2393 */
2394static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2395{
2396        int cpu, curcpu;
2397
2398        /* Notify other cpus that system-wide "drain" is running */
2399        get_online_cpus();
2400        curcpu = get_cpu();
2401        for_each_online_cpu(cpu) {
2402                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2403                struct mem_cgroup *memcg;
2404
2405                memcg = stock->cached;
2406                if (!memcg || !stock->nr_pages)
2407                        continue;
2408                if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2409                        continue;
2410                if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2411                        if (cpu == curcpu)
2412                                drain_local_stock(&stock->work);
2413                        else
2414                                schedule_work_on(cpu, &stock->work);
2415                }
2416        }
2417        put_cpu();
2418
2419        if (!sync)
2420                goto out;
2421
2422        for_each_online_cpu(cpu) {
2423                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2424                if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2425                        flush_work(&stock->work);
2426        }
2427out:
2428        put_online_cpus();
2429}
2430
2431/*
2432 * Tries to drain stocked charges in other cpus. This function is asynchronous
2433 * and just put a work per cpu for draining localy on each cpu. Caller can
2434 * expects some charges will be back to res_counter later but cannot wait for
2435 * it.
2436 */
2437static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2438{
2439        /*
2440         * If someone calls draining, avoid adding more kworker runs.
2441         */
2442        if (!mutex_trylock(&percpu_charge_mutex))
2443                return;
2444        drain_all_stock(root_memcg, false);
2445        mutex_unlock(&percpu_charge_mutex);
2446}
2447
2448/* This is a synchronous drain interface. */
2449static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2450{
2451        /* called when force_empty is called */
2452        mutex_lock(&percpu_charge_mutex);
2453        drain_all_stock(root_memcg, true);
2454        mutex_unlock(&percpu_charge_mutex);
2455}
2456
2457/*
2458 * This function drains percpu counter value from DEAD cpu and
2459 * move it to local cpu. Note that this function can be preempted.
2460 */
2461static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2462{
2463        int i;
2464
2465        spin_lock(&memcg->pcp_counter_lock);
2466        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2467                long x = per_cpu(memcg->stat->count[i], cpu);
2468
2469                per_cpu(memcg->stat->count[i], cpu) = 0;
2470                memcg->nocpu_base.count[i] += x;
2471        }
2472        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2473                unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2474
2475                per_cpu(memcg->stat->events[i], cpu) = 0;
2476                memcg->nocpu_base.events[i] += x;
2477        }
2478        spin_unlock(&memcg->pcp_counter_lock);
2479}
2480
2481static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2482                                        unsigned long action,
2483                                        void *hcpu)
2484{
2485        int cpu = (unsigned long)hcpu;
2486        struct memcg_stock_pcp *stock;
2487        struct mem_cgroup *iter;
2488
2489        if (action == CPU_ONLINE)
2490                return NOTIFY_OK;
2491
2492        if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2493                return NOTIFY_OK;
2494
2495        for_each_mem_cgroup(iter)
2496                mem_cgroup_drain_pcp_counter(iter, cpu);
2497
2498        stock = &per_cpu(memcg_stock, cpu);
2499        drain_stock(stock);
2500        return NOTIFY_OK;
2501}
2502
2503static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2504                      unsigned int nr_pages)
2505{
2506        unsigned int batch = max(CHARGE_BATCH, nr_pages);
2507        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2508        struct mem_cgroup *mem_over_limit;
2509        struct res_counter *fail_res;
2510        unsigned long nr_reclaimed;
2511        unsigned long long size;
2512        bool may_swap = true;
2513        bool drained = false;
2514        int ret = 0;
2515
2516        if (mem_cgroup_is_root(memcg))
2517                goto done;
2518retry:
2519        if (consume_stock(memcg, nr_pages))
2520                goto done;
2521
2522        size = batch * PAGE_SIZE;
2523        if (!do_swap_account ||
2524            !res_counter_charge(&memcg->memsw, size, &fail_res)) {
2525                if (!res_counter_charge(&memcg->res, size, &fail_res))
2526                        goto done_restock;
2527                if (do_swap_account)
2528                        res_counter_uncharge(&memcg->memsw, size);
2529                mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2530        } else {
2531                mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2532                may_swap = false;
2533        }
2534
2535        if (batch > nr_pages) {
2536                batch = nr_pages;
2537                goto retry;
2538        }
2539
2540        /*
2541         * Unlike in global OOM situations, memcg is not in a physical
2542         * memory shortage.  Allow dying and OOM-killed tasks to
2543         * bypass the last charges so that they can exit quickly and
2544         * free their memory.
2545         */
2546        if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2547                     fatal_signal_pending(current) ||
2548                     current->flags & PF_EXITING))
2549                goto bypass;
2550
2551        if (unlikely(task_in_memcg_oom(current)))
2552                goto nomem;
2553
2554        if (!(gfp_mask & __GFP_WAIT))
2555                goto nomem;
2556
2557        nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2558                                                    gfp_mask, may_swap);
2559
2560        if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2561                goto retry;
2562
2563        if (!drained) {
2564                drain_all_stock_async(mem_over_limit);
2565                drained = true;
2566                goto retry;
2567        }
2568
2569        if (gfp_mask & __GFP_NORETRY)
2570                goto nomem;
2571        /*
2572         * Even though the limit is exceeded at this point, reclaim
2573         * may have been able to free some pages.  Retry the charge
2574         * before killing the task.
2575         *
2576         * Only for regular pages, though: huge pages are rather
2577         * unlikely to succeed so close to the limit, and we fall back
2578         * to regular pages anyway in case of failure.
2579         */
2580        if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2581                goto retry;
2582        /*
2583         * At task move, charge accounts can be doubly counted. So, it's
2584         * better to wait until the end of task_move if something is going on.
2585         */
2586        if (mem_cgroup_wait_acct_move(mem_over_limit))
2587                goto retry;
2588
2589        if (nr_retries--)
2590                goto retry;
2591
2592        if (gfp_mask & __GFP_NOFAIL)
2593                goto bypass;
2594
2595        if (fatal_signal_pending(current))
2596                goto bypass;
2597
2598        mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
2599nomem:
2600        if (!(gfp_mask & __GFP_NOFAIL))
2601                return -ENOMEM;
2602bypass:
2603        return -EINTR;
2604
2605done_restock:
2606        if (batch > nr_pages)
2607                refill_stock(memcg, batch - nr_pages);
2608done:
2609        return ret;
2610}
2611
2612static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2613{
2614        unsigned long bytes = nr_pages * PAGE_SIZE;
2615
2616        if (mem_cgroup_is_root(memcg))
2617                return;
2618
2619        res_counter_uncharge(&memcg->res, bytes);
2620        if (do_swap_account)
2621                res_counter_uncharge(&memcg->memsw, bytes);
2622}
2623
2624/*
2625 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2626 * This is useful when moving usage to parent cgroup.
2627 */
2628static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2629                                        unsigned int nr_pages)
2630{
2631        unsigned long bytes = nr_pages * PAGE_SIZE;
2632
2633        if (mem_cgroup_is_root(memcg))
2634                return;
2635
2636        res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2637        if (do_swap_account)
2638                res_counter_uncharge_until(&memcg->memsw,
2639                                                memcg->memsw.parent, bytes);
2640}
2641
2642/*
2643 * A helper function to get mem_cgroup from ID. must be called under
2644 * rcu_read_lock().  The caller is responsible for calling
2645 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
2646 * refcnt from swap can be called against removed memcg.)
2647 */
2648static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2649{
2650        /* ID 0 is unused ID */
2651        if (!id)
2652                return NULL;
2653        return mem_cgroup_from_id(id);
2654}
2655
2656/*
2657 * try_get_mem_cgroup_from_page - look up page's memcg association
2658 * @page: the page
2659 *
2660 * Look up, get a css reference, and return the memcg that owns @page.
2661 *
2662 * The page must be locked to prevent racing with swap-in and page
2663 * cache charges.  If coming from an unlocked page table, the caller
2664 * must ensure the page is on the LRU or this can race with charging.
2665 */
2666struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2667{
2668        struct mem_cgroup *memcg = NULL;
2669        struct page_cgroup *pc;
2670        unsigned short id;
2671        swp_entry_t ent;
2672
2673        VM_BUG_ON_PAGE(!PageLocked(page), page);
2674
2675        pc = lookup_page_cgroup(page);
2676        if (PageCgroupUsed(pc)) {
2677                memcg = pc->mem_cgroup;
2678                if (memcg && !css_tryget_online(&memcg->css))
2679                        memcg = NULL;
2680        } else if (PageSwapCache(page)) {
2681                ent.val = page_private(page);
2682                id = lookup_swap_cgroup_id(ent);
2683                rcu_read_lock();
2684                memcg = mem_cgroup_lookup(id);
2685                if (memcg && !css_tryget_online(&memcg->css))
2686                        memcg = NULL;
2687                rcu_read_unlock();
2688        }
2689        return memcg;
2690}
2691
2692static void lock_page_lru(struct page *page, int *isolated)
2693{
2694        struct zone *zone = page_zone(page);
2695
2696        spin_lock_irq(&zone->lru_lock);
2697        if (PageLRU(page)) {
2698                struct lruvec *lruvec;
2699
2700                lruvec = mem_cgroup_page_lruvec(page, zone);
2701                ClearPageLRU(page);
2702                del_page_from_lru_list(page, lruvec, page_lru(page));
2703                *isolated = 1;
2704        } else
2705                *isolated = 0;
2706}
2707
2708static void unlock_page_lru(struct page *page, int isolated)
2709{
2710        struct zone *zone = page_zone(page);
2711
2712        if (isolated) {
2713                struct lruvec *lruvec;
2714
2715                lruvec = mem_cgroup_page_lruvec(page, zone);
2716                VM_BUG_ON_PAGE(PageLRU(page), page);
2717                SetPageLRU(page);
2718                add_page_to_lru_list(page, lruvec, page_lru(page));
2719        }
2720        spin_unlock_irq(&zone->lru_lock);
2721}
2722
2723static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2724                          bool lrucare)
2725{
2726        struct page_cgroup *pc = lookup_page_cgroup(page);
2727        int isolated;
2728
2729        VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2730        /*
2731         * we don't need page_cgroup_lock about tail pages, becase they are not
2732         * accessed by any other context at this point.
2733         */
2734
2735        /*
2736         * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2737         * may already be on some other mem_cgroup's LRU.  Take care of it.
2738         */
2739        if (lrucare)
2740                lock_page_lru(page, &isolated);
2741
2742        /*
2743         * Nobody should be changing or seriously looking at
2744         * pc->mem_cgroup and pc->flags at this point:
2745         *
2746         * - the page is uncharged
2747         *
2748         * - the page is off-LRU
2749         *
2750         * - an anonymous fault has exclusive page access, except for
2751         *   a locked page table
2752         *
2753         * - a page cache insertion, a swapin fault, or a migration
2754         *   have the page locked
2755         */
2756        pc->mem_cgroup = memcg;
2757        pc->flags = PCG_USED | PCG_MEM | (do_swap_account ? PCG_MEMSW : 0);
2758
2759        if (lrucare)
2760                unlock_page_lru(page, isolated);
2761}
2762
2763static DEFINE_MUTEX(set_limit_mutex);
2764
2765#ifdef CONFIG_MEMCG_KMEM
2766/*
2767 * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or
2768 * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists.
2769 */
2770static DEFINE_MUTEX(memcg_slab_mutex);
2771
2772static DEFINE_MUTEX(activate_kmem_mutex);
2773
2774/*
2775 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2776 * in the memcg_cache_params struct.
2777 */
2778static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2779{
2780        struct kmem_cache *cachep;
2781
2782        VM_BUG_ON(p->is_root_cache);
2783        cachep = p->root_cache;
2784        return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2785}
2786
2787#ifdef CONFIG_SLABINFO
2788static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2789{
2790        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2791        struct memcg_cache_params *params;
2792
2793        if (!memcg_kmem_is_active(memcg))
2794                return -EIO;
2795
2796        print_slabinfo_header(m);
2797
2798        mutex_lock(&memcg_slab_mutex);
2799        list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2800                cache_show(memcg_params_to_cache(params), m);
2801        mutex_unlock(&memcg_slab_mutex);
2802
2803        return 0;
2804}
2805#endif
2806
2807static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2808{
2809        struct res_counter *fail_res;
2810        int ret = 0;
2811
2812        ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2813        if (ret)
2814                return ret;
2815
2816        ret = try_charge(memcg, gfp, size >> PAGE_SHIFT);
2817        if (ret == -EINTR)  {
2818                /*
2819                 * try_charge() chose to bypass to root due to OOM kill or
2820                 * fatal signal.  Since our only options are to either fail
2821                 * the allocation or charge it to this cgroup, do it as a
2822                 * temporary condition. But we can't fail. From a kmem/slab
2823                 * perspective, the cache has already been selected, by
2824                 * mem_cgroup_kmem_get_cache(), so it is too late to change
2825                 * our minds.
2826                 *
2827                 * This condition will only trigger if the task entered
2828                 * memcg_charge_kmem in a sane state, but was OOM-killed
2829                 * during try_charge() above. Tasks that were already dying
2830                 * when the allocation triggers should have been already
2831                 * directed to the root cgroup in memcontrol.h
2832                 */
2833                res_counter_charge_nofail(&memcg->res, size, &fail_res);
2834                if (do_swap_account)
2835                        res_counter_charge_nofail(&memcg->memsw, size,
2836                                                  &fail_res);
2837                ret = 0;
2838        } else if (ret)
2839                res_counter_uncharge(&memcg->kmem, size);
2840
2841        return ret;
2842}
2843
2844static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2845{
2846        res_counter_uncharge(&memcg->res, size);
2847        if (do_swap_account)
2848                res_counter_uncharge(&memcg->memsw, size);
2849
2850        /* Not down to 0 */
2851        if (res_counter_uncharge(&memcg->kmem, size))
2852                return;
2853
2854        /*
2855         * Releases a reference taken in kmem_cgroup_css_offline in case
2856         * this last uncharge is racing with the offlining code or it is
2857         * outliving the memcg existence.
2858         *
2859         * The memory barrier imposed by test&clear is paired with the
2860         * explicit one in memcg_kmem_mark_dead().
2861         */
2862        if (memcg_kmem_test_and_clear_dead(memcg))
2863                css_put(&memcg->css);
2864}
2865
2866/*
2867 * helper for acessing a memcg's index. It will be used as an index in the
2868 * child cache array in kmem_cache, and also to derive its name. This function
2869 * will return -1 when this is not a kmem-limited memcg.
2870 */
2871int memcg_cache_id(struct mem_cgroup *memcg)
2872{
2873        return memcg ? memcg->kmemcg_id : -1;
2874}
2875
2876static int memcg_alloc_cache_id(void)
2877{
2878        int id, size;
2879        int err;
2880
2881        id = ida_simple_get(&kmem_limited_groups,
2882                            0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2883        if (id < 0)
2884                return id;
2885
2886        if (id < memcg_limited_groups_array_size)
2887                return id;
2888
2889        /*
2890         * There's no space for the new id in memcg_caches arrays,
2891         * so we have to grow them.
2892         */
2893
2894        size = 2 * (id + 1);
2895        if (size < MEMCG_CACHES_MIN_SIZE)
2896                size = MEMCG_CACHES_MIN_SIZE;
2897        else if (size > MEMCG_CACHES_MAX_SIZE)
2898                size = MEMCG_CACHES_MAX_SIZE;
2899
2900        mutex_lock(&memcg_slab_mutex);
2901        err = memcg_update_all_caches(size);
2902        mutex_unlock(&memcg_slab_mutex);
2903
2904        if (err) {
2905                ida_simple_remove(&kmem_limited_groups, id);
2906                return err;
2907        }
2908        return id;
2909}
2910
2911static void memcg_free_cache_id(int id)
2912{
2913        ida_simple_remove(&kmem_limited_groups, id);
2914}
2915
2916/*
2917 * We should update the current array size iff all caches updates succeed. This
2918 * can only be done from the slab side. The slab mutex needs to be held when
2919 * calling this.
2920 */
2921void memcg_update_array_size(int num)
2922{
2923        memcg_limited_groups_array_size = num;
2924}
2925
2926static void memcg_register_cache(struct mem_cgroup *memcg,
2927                                 struct kmem_cache *root_cache)
2928{
2929        static char memcg_name_buf[NAME_MAX + 1]; /* protected by
2930                                                     memcg_slab_mutex */
2931        struct kmem_cache *cachep;
2932        int id;
2933
2934        lockdep_assert_held(&memcg_slab_mutex);
2935
2936        id = memcg_cache_id(memcg);
2937
2938        /*
2939         * Since per-memcg caches are created asynchronously on first
2940         * allocation (see memcg_kmem_get_cache()), several threads can try to
2941         * create the same cache, but only one of them may succeed.
2942         */
2943        if (cache_from_memcg_idx(root_cache, id))
2944                return;
2945
2946        cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
2947        cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
2948        /*
2949         * If we could not create a memcg cache, do not complain, because
2950         * that's not critical at all as we can always proceed with the root
2951         * cache.
2952         */
2953        if (!cachep)
2954                return;
2955
2956        css_get(&memcg->css);
2957        list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
2958
2959        /*
2960         * Since readers won't lock (see cache_from_memcg_idx()), we need a
2961         * barrier here to ensure nobody will see the kmem_cache partially
2962         * initialized.
2963         */
2964        smp_wmb();
2965
2966        BUG_ON(root_cache->memcg_params->memcg_caches[id]);
2967        root_cache->memcg_params->memcg_caches[id] = cachep;
2968}
2969
2970static void memcg_unregister_cache(struct kmem_cache *cachep)
2971{
2972        struct kmem_cache *root_cache;
2973        struct mem_cgroup *memcg;
2974        int id;
2975
2976        lockdep_assert_held(&memcg_slab_mutex);
2977
2978        BUG_ON(is_root_cache(cachep));
2979
2980        root_cache = cachep->memcg_params->root_cache;
2981        memcg = cachep->memcg_params->memcg;
2982        id = memcg_cache_id(memcg);
2983
2984        BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep);
2985        root_cache->memcg_params->memcg_caches[id] = NULL;
2986
2987        list_del(&cachep->memcg_params->list);
2988
2989        kmem_cache_destroy(cachep);
2990
2991        /* drop the reference taken in memcg_register_cache */
2992        css_put(&memcg->css);
2993}
2994
2995/*
2996 * During the creation a new cache, we need to disable our accounting mechanism
2997 * altogether. This is true even if we are not creating, but rather just
2998 * enqueing new caches to be created.
2999 *
3000 * This is because that process will trigger allocations; some visible, like
3001 * explicit kmallocs to auxiliary data structures, name strings and internal
3002 * cache structures; some well concealed, like INIT_WORK() that can allocate
3003 * objects during debug.
3004 *
3005 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3006 * to it. This may not be a bounded recursion: since the first cache creation
3007 * failed to complete (waiting on the allocation), we'll just try to create the
3008 * cache again, failing at the same point.
3009 *
3010 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3011 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3012 * inside the following two functions.
3013 */
3014static inline void memcg_stop_kmem_account(void)
3015{
3016        VM_BUG_ON(!current->mm);
3017        current->memcg_kmem_skip_account++;
3018}
3019
3020static inline void memcg_resume_kmem_account(void)
3021{
3022        VM_BUG_ON(!current->mm);
3023        current->memcg_kmem_skip_account--;
3024}
3025
3026int __memcg_cleanup_cache_params(struct kmem_cache *s)
3027{
3028        struct kmem_cache *c;
3029        int i, failed = 0;
3030
3031        mutex_lock(&memcg_slab_mutex);
3032        for_each_memcg_cache_index(i) {
3033                c = cache_from_memcg_idx(s, i);
3034                if (!c)
3035                        continue;
3036
3037                memcg_unregister_cache(c);
3038
3039                if (cache_from_memcg_idx(s, i))
3040                        failed++;
3041        }
3042        mutex_unlock(&memcg_slab_mutex);
3043        return failed;
3044}
3045
3046static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
3047{
3048        struct kmem_cache *cachep;
3049        struct memcg_cache_params *params, *tmp;
3050
3051        if (!memcg_kmem_is_active(memcg))
3052                return;
3053
3054        mutex_lock(&memcg_slab_mutex);
3055        list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
3056                cachep = memcg_params_to_cache(params);
3057                kmem_cache_shrink(cachep);
3058                if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
3059                        memcg_unregister_cache(cachep);
3060        }
3061        mutex_unlock(&memcg_slab_mutex);
3062}
3063
3064struct memcg_register_cache_work {
3065        struct mem_cgroup *memcg;
3066        struct kmem_cache *cachep;
3067        struct work_struct work;
3068};
3069
3070static void memcg_register_cache_func(struct work_struct *w)
3071{
3072        struct memcg_register_cache_work *cw =
3073                container_of(w, struct memcg_register_cache_work, work);
3074        struct mem_cgroup *memcg = cw->memcg;
3075        struct kmem_cache *cachep = cw->cachep;
3076
3077        mutex_lock(&memcg_slab_mutex);
3078        memcg_register_cache(memcg, cachep);
3079        mutex_unlock(&memcg_slab_mutex);
3080
3081        css_put(&memcg->css);
3082        kfree(cw);
3083}
3084
3085/*
3086 * Enqueue the creation of a per-memcg kmem_cache.
3087 */
3088static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
3089                                            struct kmem_cache *cachep)
3090{
3091        struct memcg_register_cache_work *cw;
3092
3093        cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
3094        if (cw == NULL) {
3095                css_put(&memcg->css);
3096                return;
3097        }
3098
3099        cw->memcg = memcg;
3100        cw->cachep = cachep;
3101
3102        INIT_WORK(&cw->work, memcg_register_cache_func);
3103        schedule_work(&cw->work);
3104}
3105
3106static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
3107                                          struct kmem_cache *cachep)
3108{
3109        /*
3110         * We need to stop accounting when we kmalloc, because if the
3111         * corresponding kmalloc cache is not yet created, the first allocation
3112         * in __memcg_schedule_register_cache will recurse.
3113         *
3114         * However, it is better to enclose the whole function. Depending on
3115         * the debugging options enabled, INIT_WORK(), for instance, can
3116         * trigger an allocation. This too, will make us recurse. Because at
3117         * this point we can't allow ourselves back into memcg_kmem_get_cache,
3118         * the safest choice is to do it like this, wrapping the whole function.
3119         */
3120        memcg_stop_kmem_account();
3121        __memcg_schedule_register_cache(memcg, cachep);
3122        memcg_resume_kmem_account();
3123}
3124
3125int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
3126{
3127        int res;
3128
3129        res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp,
3130                                PAGE_SIZE << order);
3131        if (!res)
3132                atomic_add(1 << order, &cachep->memcg_params->nr_pages);
3133        return res;
3134}
3135
3136void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
3137{
3138        memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
3139        atomic_sub(1 << order, &cachep->memcg_params->nr_pages);
3140}
3141
3142/*
3143 * Return the kmem_cache we're supposed to use for a slab allocation.
3144 * We try to use the current memcg's version of the cache.
3145 *
3146 * If the cache does not exist yet, if we are the first user of it,
3147 * we either create it immediately, if possible, or create it asynchronously
3148 * in a workqueue.
3149 * In the latter case, we will let the current allocation go through with
3150 * the original cache.
3151 *
3152 * Can't be called in interrupt context or from kernel threads.
3153 * This function needs to be called with rcu_read_lock() held.
3154 */
3155struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3156                                          gfp_t gfp)
3157{
3158        struct mem_cgroup *memcg;
3159        struct kmem_cache *memcg_cachep;
3160
3161        VM_BUG_ON(!cachep->memcg_params);
3162        VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3163
3164        if (!current->mm || current->memcg_kmem_skip_account)
3165                return cachep;
3166
3167        rcu_read_lock();
3168        memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3169
3170        if (!memcg_kmem_is_active(memcg))
3171                goto out;
3172
3173        memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3174        if (likely(memcg_cachep)) {
3175                cachep = memcg_cachep;
3176                goto out;
3177        }
3178
3179        /* The corresponding put will be done in the workqueue. */
3180        if (!css_tryget_online(&memcg->css))
3181                goto out;
3182        rcu_read_unlock();
3183
3184        /*
3185         * If we are in a safe context (can wait, and not in interrupt
3186         * context), we could be be predictable and return right away.
3187         * This would guarantee that the allocation being performed
3188         * already belongs in the new cache.
3189         *
3190         * However, there are some clashes that can arrive from locking.
3191         * For instance, because we acquire the slab_mutex while doing
3192         * memcg_create_kmem_cache, this means no further allocation
3193         * could happen with the slab_mutex held. So it's better to
3194         * defer everything.
3195         */
3196        memcg_schedule_register_cache(memcg, cachep);
3197        return cachep;
3198out:
3199        rcu_read_unlock();
3200        return cachep;
3201}
3202
3203/*
3204 * We need to verify if the allocation against current->mm->owner's memcg is
3205 * possible for the given order. But the page is not allocated yet, so we'll
3206 * need a further commit step to do the final arrangements.
3207 *
3208 * It is possible for the task to switch cgroups in this mean time, so at
3209 * commit time, we can't rely on task conversion any longer.  We'll then use
3210 * the handle argument to return to the caller which cgroup we should commit
3211 * against. We could also return the memcg directly and avoid the pointer
3212 * passing, but a boolean return value gives better semantics considering
3213 * the compiled-out case as well.
3214 *
3215 * Returning true means the allocation is possible.
3216 */
3217bool
3218__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3219{
3220        struct mem_cgroup *memcg;
3221        int ret;
3222
3223        *_memcg = NULL;
3224
3225        /*
3226         * Disabling accounting is only relevant for some specific memcg
3227         * internal allocations. Therefore we would initially not have such
3228         * check here, since direct calls to the page allocator that are
3229         * accounted to kmemcg (alloc_kmem_pages and friends) only happen
3230         * outside memcg core. We are mostly concerned with cache allocations,
3231         * and by having this test at memcg_kmem_get_cache, we are already able
3232         * to relay the allocation to the root cache and bypass the memcg cache
3233         * altogether.
3234         *
3235         * There is one exception, though: the SLUB allocator does not create
3236         * large order caches, but rather service large kmallocs directly from
3237         * the page allocator. Therefore, the following sequence when backed by
3238         * the SLUB allocator:
3239         *
3240         *      memcg_stop_kmem_account();
3241         *      kmalloc(<large_number>)
3242         *      memcg_resume_kmem_account();
3243         *
3244         * would effectively ignore the fact that we should skip accounting,
3245         * since it will drive us directly to this function without passing
3246         * through the cache selector memcg_kmem_get_cache. Such large
3247         * allocations are extremely rare but can happen, for instance, for the
3248         * cache arrays. We bring this test here.
3249         */
3250        if (!current->mm || current->memcg_kmem_skip_account)
3251                return true;
3252
3253        memcg = get_mem_cgroup_from_mm(current->mm);
3254
3255        if (!memcg_kmem_is_active(memcg)) {
3256                css_put(&memcg->css);
3257                return true;
3258        }
3259
3260        ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3261        if (!ret)
3262                *_memcg = memcg;
3263
3264        css_put(&memcg->css);
3265        return (ret == 0);
3266}
3267
3268void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3269                              int order)
3270{
3271        struct page_cgroup *pc;
3272
3273        VM_BUG_ON(mem_cgroup_is_root(memcg));
3274
3275        /* The page allocation failed. Revert */
3276        if (!page) {
3277                memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3278                return;
3279        }
3280        /*
3281         * The page is freshly allocated and not visible to any
3282         * outside callers yet.  Set up pc non-atomically.
3283         */
3284        pc = lookup_page_cgroup(page);
3285        pc->mem_cgroup = memcg;
3286        pc->flags = PCG_USED;
3287}
3288
3289void __memcg_kmem_uncharge_pages(struct page *page, int order)
3290{
3291        struct mem_cgroup *memcg = NULL;
3292        struct page_cgroup *pc;
3293
3294
3295        pc = lookup_page_cgroup(page);
3296        if (!PageCgroupUsed(pc))
3297                return;
3298
3299        memcg = pc->mem_cgroup;
3300        pc->flags = 0;
3301
3302        /*
3303         * We trust that only if there is a memcg associated with the page, it
3304         * is a valid allocation
3305         */
3306        if (!memcg)
3307                return;
3308
3309        VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3310        memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3311}
3312#else
3313static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
3314{
3315}
3316#endif /* CONFIG_MEMCG_KMEM */
3317
3318#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3319
3320/*
3321 * Because tail pages are not marked as "used", set it. We're under
3322 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3323 * charge/uncharge will be never happen and move_account() is done under
3324 * compound_lock(), so we don't have to take care of races.
3325 */
3326void mem_cgroup_split_huge_fixup(struct page *head)
3327{
3328        struct page_cgroup *head_pc = lookup_page_cgroup(head);
3329        struct page_cgroup *pc;
3330        struct mem_cgroup *memcg;
3331        int i;
3332
3333        if (mem_cgroup_disabled())
3334                return;
3335
3336        memcg = head_pc->mem_cgroup;
3337        for (i = 1; i < HPAGE_PMD_NR; i++) {
3338                pc = head_pc + i;
3339                pc->mem_cgroup = memcg;
3340                pc->flags = head_pc->flags;
3341        }
3342        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3343                       HPAGE_PMD_NR);
3344}
3345#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3346
3347/**
3348 * mem_cgroup_move_account - move account of the page
3349 * @page: the page
3350 * @nr_pages: number of regular pages (>1 for huge pages)
3351 * @pc: page_cgroup of the page.
3352 * @from: mem_cgroup which the page is moved from.
3353 * @to: mem_cgroup which the page is moved to. @from != @to.
3354 *
3355 * The caller must confirm following.
3356 * - page is not on LRU (isolate_page() is useful.)
3357 * - compound_lock is held when nr_pages > 1
3358 *
3359 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3360 * from old cgroup.
3361 */
3362static int mem_cgroup_move_account(struct page *page,
3363                                   unsigned int nr_pages,
3364                                   struct page_cgroup *pc,
3365                                   struct mem_cgroup *from,
3366                                   struct mem_cgroup *to)
3367{
3368        unsigned long flags;
3369        int ret;
3370
3371        VM_BUG_ON(from == to);
3372        VM_BUG_ON_PAGE(PageLRU(page), page);
3373        /*
3374         * The page is isolated from LRU. So, collapse function
3375         * will not handle this page. But page splitting can happen.
3376         * Do this check under compound_page_lock(). The caller should
3377         * hold it.
3378         */
3379        ret = -EBUSY;
3380        if (nr_pages > 1 && !PageTransHuge(page))
3381                goto out;
3382
3383        /*
3384         * Prevent mem_cgroup_migrate() from looking at pc->mem_cgroup
3385         * of its source page while we change it: page migration takes
3386         * both pages off the LRU, but page cache replacement doesn't.
3387         */
3388        if (!trylock_page(page))
3389                goto out;
3390
3391        ret = -EINVAL;
3392        if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3393                goto out_unlock;
3394
3395        move_lock_mem_cgroup(from, &flags);
3396
3397        if (!PageAnon(page) && page_mapped(page)) {
3398                __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3399                               nr_pages);
3400                __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3401                               nr_pages);
3402        }
3403
3404        if (PageWriteback(page)) {
3405                __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3406                               nr_pages);
3407                __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3408                               nr_pages);
3409        }
3410
3411        /*
3412         * It is safe to change pc->mem_cgroup here because the page
3413         * is referenced, charged, and isolated - we can't race with
3414         * uncharging, charging, migration, or LRU putback.
3415         */
3416
3417        /* caller should have done css_get */
3418        pc->mem_cgroup = to;
3419        move_unlock_mem_cgroup(from, &flags);
3420        ret = 0;
3421
3422        local_irq_disable();
3423        mem_cgroup_charge_statistics(to, page, nr_pages);
3424        memcg_check_events(to, page);
3425        mem_cgroup_charge_statistics(from, page, -nr_pages);
3426        memcg_check_events(from, page);
3427        local_irq_enable();
3428out_unlock:
3429        unlock_page(page);
3430out:
3431        return ret;
3432}
3433
3434/**
3435 * mem_cgroup_move_parent - moves page to the parent group
3436 * @page: the page to move
3437 * @pc: page_cgroup of the page
3438 * @child: page's cgroup
3439 *
3440 * move charges to its parent or the root cgroup if the group has no
3441 * parent (aka use_hierarchy==0).
3442 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3443 * mem_cgroup_move_account fails) the failure is always temporary and
3444 * it signals a race with a page removal/uncharge or migration. In the
3445 * first case the page is on the way out and it will vanish from the LRU
3446 * on the next attempt and the call should be retried later.
3447 * Isolation from the LRU fails only if page has been isolated from
3448 * the LRU since we looked at it and that usually means either global
3449 * reclaim or migration going on. The page will either get back to the
3450 * LRU or vanish.
3451 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3452 * (!PageCgroupUsed) or moved to a different group. The page will
3453 * disappear in the next attempt.
3454 */
3455static int mem_cgroup_move_parent(struct page *page,
3456                                  struct page_cgroup *pc,
3457                                  struct mem_cgroup *child)
3458{
3459        struct mem_cgroup *parent;
3460        unsigned int nr_pages;
3461        unsigned long uninitialized_var(flags);
3462        int ret;
3463
3464        VM_BUG_ON(mem_cgroup_is_root(child));
3465
3466        ret = -EBUSY;
3467        if (!get_page_unless_zero(page))
3468                goto out;
3469        if (isolate_lru_page(page))
3470                goto put;
3471
3472        nr_pages = hpage_nr_pages(page);
3473
3474        parent = parent_mem_cgroup(child);
3475        /*
3476         * If no parent, move charges to root cgroup.
3477         */
3478        if (!parent)
3479                parent = root_mem_cgroup;
3480
3481        if (nr_pages > 1) {
3482                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3483                flags = compound_lock_irqsave(page);
3484        }
3485
3486        ret = mem_cgroup_move_account(page, nr_pages,
3487                                pc, child, parent);
3488        if (!ret)
3489                __mem_cgroup_cancel_local_charge(child, nr_pages);
3490
3491        if (nr_pages > 1)
3492                compound_unlock_irqrestore(page, flags);
3493        putback_lru_page(page);
3494put:
3495        put_page(page);
3496out:
3497        return ret;
3498}
3499
3500#ifdef CONFIG_MEMCG_SWAP
3501static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
3502                                         bool charge)
3503{
3504        int val = (charge) ? 1 : -1;
3505        this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
3506}
3507
3508/**
3509 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3510 * @entry: swap entry to be moved
3511 * @from:  mem_cgroup which the entry is moved from
3512 * @to:  mem_cgroup which the entry is moved to
3513 *
3514 * It succeeds only when the swap_cgroup's record for this entry is the same
3515 * as the mem_cgroup's id of @from.
3516 *
3517 * Returns 0 on success, -EINVAL on failure.
3518 *
3519 * The caller must have charged to @to, IOW, called res_counter_charge() about
3520 * both res and memsw, and called css_get().
3521 */
3522static int mem_cgroup_move_swap_account(swp_entry_t entry,
3523                                struct mem_cgroup *from, struct mem_cgroup *to)
3524{
3525        unsigned short old_id, new_id;
3526
3527        old_id = mem_cgroup_id(from);
3528        new_id = mem_cgroup_id(to);
3529
3530        if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3531                mem_cgroup_swap_statistics(from, false);
3532                mem_cgroup_swap_statistics(to, true);
3533                /*
3534                 * This function is only called from task migration context now.
3535                 * It postpones res_counter and refcount handling till the end
3536                 * of task migration(mem_cgroup_clear_mc()) for performance
3537                 * improvement. But we cannot postpone css_get(to)  because if
3538                 * the process that has been moved to @to does swap-in, the
3539                 * refcount of @to might be decreased to 0.
3540                 *
3541                 * We are in attach() phase, so the cgroup is guaranteed to be
3542                 * alive, so we can just call css_get().
3543                 */
3544                css_get(&to->css);
3545                return 0;
3546        }
3547        return -EINVAL;
3548}
3549#else
3550static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3551                                struct mem_cgroup *from, struct mem_cgroup *to)
3552{
3553        return -EINVAL;
3554}
3555#endif
3556
3557#ifdef CONFIG_DEBUG_VM
3558static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3559{
3560        struct page_cgroup *pc;
3561
3562        pc = lookup_page_cgroup(page);
3563        /*
3564         * Can be NULL while feeding pages into the page allocator for
3565         * the first time, i.e. during boot or memory hotplug;
3566         * or when mem_cgroup_disabled().
3567         */
3568        if (likely(pc) && PageCgroupUsed(pc))
3569                return pc;
3570        return NULL;
3571}
3572
3573bool mem_cgroup_bad_page_check(struct page *page)
3574{
3575        if (mem_cgroup_disabled())
3576                return false;
3577
3578        return lookup_page_cgroup_used(page) != NULL;
3579}
3580
3581void mem_cgroup_print_bad_page(struct page *page)
3582{
3583        struct page_cgroup *pc;
3584
3585        pc = lookup_page_cgroup_used(page);
3586        if (pc) {
3587                pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3588                         pc, pc->flags, pc->mem_cgroup);
3589        }
3590}
3591#endif
3592
3593static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3594                                unsigned long long val)
3595{
3596        int retry_count;
3597        int ret = 0;
3598        int children = mem_cgroup_count_children(memcg);
3599        u64 curusage, oldusage;
3600        int enlarge;
3601
3602        /*
3603         * For keeping hierarchical_reclaim simple, how long we should retry
3604         * is depends on callers. We set our retry-count to be function
3605         * of # of children which we should visit in this loop.
3606         */
3607        retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3608
3609        oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3610
3611        enlarge = 0;
3612        while (retry_count) {
3613                if (signal_pending(current)) {
3614                        ret = -EINTR;
3615                        break;
3616                }
3617                /*
3618                 * Rather than hide all in some function, I do this in
3619                 * open coded manner. You see what this really does.
3620                 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
3621                 */
3622                mutex_lock(&set_limit_mutex);
3623                if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) {
3624                        ret = -EINVAL;
3625                        mutex_unlock(&set_limit_mutex);
3626                        break;
3627                }
3628
3629                if (res_counter_read_u64(&memcg->res, RES_LIMIT) < val)
3630                        enlarge = 1;
3631
3632                ret = res_counter_set_limit(&memcg->res, val);
3633                mutex_unlock(&set_limit_mutex);
3634
3635                if (!ret)
3636                        break;
3637
3638                try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
3639
3640                curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3641                /* Usage is reduced ? */
3642                if (curusage >= oldusage)
3643                        retry_count--;
3644                else
3645                        oldusage = curusage;
3646        }
3647        if (!ret && enlarge)
3648                memcg_oom_recover(memcg);
3649
3650        return ret;
3651}
3652
3653static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3654                                        unsigned long long val)
3655{
3656        int retry_count;
3657        u64 oldusage, curusage;
3658        int children = mem_cgroup_count_children(memcg);
3659        int ret = -EBUSY;
3660        int enlarge = 0;
3661
3662        /* see mem_cgroup_resize_res_limit */
3663        retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3664        oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3665        while (retry_count) {
3666                if (signal_pending(current)) {
3667                        ret = -EINTR;
3668                        break;
3669                }
3670                /*
3671                 * Rather than hide all in some function, I do this in
3672                 * open coded manner. You see what this really does.
3673                 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
3674                 */
3675                mutex_lock(&set_limit_mutex);
3676                if (res_counter_read_u64(&memcg->res, RES_LIMIT) > val) {
3677                        ret = -EINVAL;
3678                        mutex_unlock(&set_limit_mutex);
3679                        break;
3680                }
3681                if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val)
3682                        enlarge = 1;
3683                ret = res_counter_set_limit(&memcg->memsw, val);
3684                mutex_unlock(&set_limit_mutex);
3685
3686                if (!ret)
3687                        break;
3688
3689                try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
3690
3691                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3692                /* Usage is reduced ? */
3693                if (curusage >= oldusage)
3694                        retry_count--;
3695                else
3696                        oldusage = curusage;
3697        }
3698        if (!ret && enlarge)
3699                memcg_oom_recover(memcg);
3700        return ret;
3701}
3702
3703unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3704                                            gfp_t gfp_mask,
3705                                            unsigned long *total_scanned)
3706{
3707        unsigned long nr_reclaimed = 0;
3708        struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3709        unsigned long reclaimed;
3710        int loop = 0;
3711        struct mem_cgroup_tree_per_zone *mctz;
3712        unsigned long long excess;
3713        unsigned long nr_scanned;
3714
3715        if (order > 0)
3716                return 0;
3717
3718        mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3719        /*
3720         * This loop can run a while, specially if mem_cgroup's continuously
3721         * keep exceeding their soft limit and putting the system under
3722         * pressure
3723         */
3724        do {
3725                if (next_mz)
3726                        mz = next_mz;
3727                else
3728                        mz = mem_cgroup_largest_soft_limit_node(mctz);
3729                if (!mz)
3730                        break;
3731
3732                nr_scanned = 0;
3733                reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3734                                                    gfp_mask, &nr_scanned);
3735                nr_reclaimed += reclaimed;
3736                *total_scanned += nr_scanned;
3737                spin_lock_irq(&mctz->lock);
3738
3739                /*
3740                 * If we failed to reclaim anything from this memory cgroup
3741                 * it is time to move on to the next cgroup
3742                 */
3743                next_mz = NULL;
3744                if (!reclaimed) {
3745                        do {
3746                                /*
3747                                 * Loop until we find yet another one.
3748                                 *
3749                                 * By the time we get the soft_limit lock
3750                                 * again, someone might have aded the
3751                                 * group back on the RB tree. Iterate to
3752                                 * make sure we get a different mem.
3753                                 * mem_cgroup_largest_soft_limit_node returns
3754                                 * NULL if no other cgroup is present on
3755                                 * the tree
3756                                 */
3757                                next_mz =
3758                                __mem_cgroup_largest_soft_limit_node(mctz);
3759                                if (next_mz == mz)
3760                                        css_put(&next_mz->memcg->css);
3761                                else /* next_mz == NULL or other memcg */
3762                                        break;
3763                        } while (1);
3764                }
3765                __mem_cgroup_remove_exceeded(mz, mctz);
3766                excess = res_counter_soft_limit_excess(&mz->memcg->res);
3767                /*
3768                 * One school of thought says that we should not add
3769                 * back the node to the tree if reclaim returns 0.
3770                 * But our reclaim could return 0, simply because due
3771                 * to priority we are exposing a smaller subset of
3772                 * memory to reclaim from. Consider this as a longer
3773                 * term TODO.
3774                 */
3775                /* If excess == 0, no tree ops */
3776                __mem_cgroup_insert_exceeded(mz, mctz, excess);
3777                spin_unlock_irq(&mctz->lock);
3778                css_put(&mz->memcg->css);
3779                loop++;
3780                /*
3781                 * Could not reclaim anything and there are no more
3782                 * mem cgroups to try or we seem to be looping without
3783                 * reclaiming anything.
3784                 */
3785                if (!nr_reclaimed &&
3786                        (next_mz == NULL ||
3787                        loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3788                        break;
3789        } while (!nr_reclaimed);
3790        if (next_mz)
3791                css_put(&next_mz->memcg->css);
3792        return nr_reclaimed;
3793}
3794
3795/**
3796 * mem_cgroup_force_empty_list - clears LRU of a group
3797 * @memcg: group to clear
3798 * @node: NUMA node
3799 * @zid: zone id
3800 * @lru: lru to to clear
3801 *
3802 * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
3803 * reclaim the pages page themselves - pages are moved to the parent (or root)
3804 * group.
3805 */
3806static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3807                                int node, int zid, enum lru_list lru)
3808{
3809        struct lruvec *lruvec;
3810        unsigned long flags;
3811        struct list_head *list;
3812        struct page *busy;
3813        struct zone *zone;
3814
3815        zone = &NODE_DATA(node)->node_zones[zid];
3816        lruvec = mem_cgroup_zone_lruvec(zone, memcg);
3817        list = &lruvec->lists[lru];
3818
3819        busy = NULL;
3820        do {
3821                struct page_cgroup *pc;
3822                struct page *page;
3823
3824                spin_lock_irqsave(&zone->lru_lock, flags);
3825                if (list_empty(list)) {
3826                        spin_unlock_irqrestore(&zone->lru_lock, flags);
3827                        break;
3828                }
3829                page = list_entry(list->prev, struct page, lru);
3830                if (busy == page) {
3831                        list_move(&page->lru, list);
3832                        busy = NULL;
3833                        spin_unlock_irqrestore(&zone->lru_lock, flags);
3834                        continue;
3835                }
3836                spin_unlock_irqrestore(&zone->lru_lock, flags);
3837
3838                pc = lookup_page_cgroup(page);
3839
3840                if (mem_cgroup_move_parent(page, pc, memcg)) {
3841                        /* found lock contention or "pc" is obsolete. */
3842                        busy = page;
3843                } else
3844                        busy = NULL;
3845                cond_resched();
3846        } while (!list_empty(list));
3847}
3848
3849/*
3850 * make mem_cgroup's charge to be 0 if there is no task by moving
3851 * all the charges and pages to the parent.
3852 * This enables deleting this mem_cgroup.
3853 *
3854 * Caller is responsible for holding css reference on the memcg.
3855 */
3856static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
3857{
3858        int node, zid;
3859        u64 usage;
3860
3861        do {
3862                /* This is for making all *used* pages to be on LRU. */
3863                lru_add_drain_all();
3864                drain_all_stock_sync(memcg);
3865                mem_cgroup_start_move(memcg);
3866                for_each_node_state(node, N_MEMORY) {
3867                        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3868                                enum lru_list lru;
3869                                for_each_lru(lru) {
3870                                        mem_cgroup_force_empty_list(memcg,
3871                                                        node, zid, lru);
3872                                }
3873                        }
3874                }
3875                mem_cgroup_end_move(memcg);
3876                memcg_oom_recover(memcg);
3877                cond_resched();
3878
3879                /*
3880                 * Kernel memory may not necessarily be trackable to a specific
3881                 * process. So they are not migrated, and therefore we can't
3882                 * expect their value to drop to 0 here.
3883                 * Having res filled up with kmem only is enough.
3884                 *
3885                 * This is a safety check because mem_cgroup_force_empty_list
3886                 * could have raced with mem_cgroup_replace_page_cache callers
3887                 * so the lru seemed empty but the page could have been added
3888                 * right after the check. RES_USAGE should be safe as we always
3889                 * charge before adding to the LRU.
3890                 */
3891                usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
3892                        res_counter_read_u64(&memcg->kmem, RES_USAGE);
3893        } while (usage > 0);
3894}
3895
3896/*
3897 * Test whether @memcg has children, dead or alive.  Note that this
3898 * function doesn't care whether @memcg has use_hierarchy enabled and
3899 * returns %true if there are child csses according to the cgroup
3900 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
3901 */
3902static inline bool memcg_has_children(struct mem_cgroup *memcg)
3903{
3904        bool ret;
3905
3906        /*
3907         * The lock does not prevent addition or deletion of children, but
3908         * it prevents a new child from being initialized based on this
3909         * parent in css_online(), so it's enough to decide whether
3910         * hierarchically inherited attributes can still be changed or not.
3911         */
3912        lockdep_assert_held(&memcg_create_mutex);
3913
3914        rcu_read_lock();
3915        ret = css_next_child(NULL, &memcg->css);
3916        rcu_read_unlock();
3917        return ret;
3918}
3919
3920/*
3921 * Reclaims as many pages from the given memcg as possible and moves
3922 * the rest to the parent.
3923 *
3924 * Caller is responsible for holding css reference for memcg.
3925 */
3926static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3927{
3928        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3929
3930        /* we call try-to-free pages for make this cgroup empty */
3931        lru_add_drain_all();
3932        /* try to free all pages in this cgroup */
3933        while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
3934                int progress;
3935
3936                if (signal_pending(current))
3937                        return -EINTR;
3938
3939                progress = try_to_free_mem_cgroup_pages(memcg, 1,
3940                                                        GFP_KERNEL, true);
3941                if (!progress) {
3942                        nr_retries--;
3943                        /* maybe some writeback is necessary */
3944                        congestion_wait(BLK_RW_ASYNC, HZ/10);
3945                }
3946
3947        }
3948
3949        return 0;
3950}
3951
3952static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3953                                            char *buf, size_t nbytes,
3954                                            loff_t off)
3955{
3956        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3957
3958        if (mem_cgroup_is_root(memcg))
3959                return -EINVAL;
3960        return mem_cgroup_force_empty(memcg) ?: nbytes;
3961}
3962
3963static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3964                                     struct cftype *cft)
3965{
3966        return mem_cgroup_from_css(css)->use_hierarchy;
3967}
3968
3969static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3970                                      struct cftype *cft, u64 val)
3971{
3972        int retval = 0;
3973        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3974        struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3975
3976        mutex_lock(&memcg_create_mutex);
3977
3978        if (memcg->use_hierarchy == val)
3979                goto out;
3980
3981        /*
3982         * If parent's use_hierarchy is set, we can't make any modifications
3983         * in the child subtrees. If it is unset, then the change can
3984         * occur, provided the current cgroup has no children.
3985         *
3986         * For the root cgroup, parent_mem is NULL, we allow value to be
3987         * set if there are no children.
3988         */
3989        if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3990                                (val == 1 || val == 0)) {
3991                if (!memcg_has_children(memcg))
3992                        memcg->use_hierarchy = val;
3993                else
3994                        retval = -EBUSY;
3995        } else
3996                retval = -EINVAL;
3997
3998out:
3999        mutex_unlock(&memcg_create_mutex);
4000
4001        return retval;
4002}
4003
4004static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4005                                               enum mem_cgroup_stat_index idx)
4006{
4007        struct mem_cgroup *iter;
4008        long val = 0;
4009
4010        /* Per-cpu values can be negative, use a signed accumulator */
4011        for_each_mem_cgroup_tree(iter, memcg)
4012                val += mem_cgroup_read_stat(iter, idx);
4013
4014        if (val < 0) /* race ? */
4015                val = 0;
4016        return val;
4017}
4018
4019static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4020{
4021        u64 val;
4022
4023        if (!mem_cgroup_is_root(memcg)) {
4024                if (!swap)
4025                        return res_counter_read_u64(&memcg->res, RES_USAGE);
4026                else
4027                        return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4028        }
4029
4030        /*
4031         * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4032         * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4033         */
4034        val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4035        val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4036
4037        if (swap)
4038                val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4039
4040        return val << PAGE_SHIFT;
4041}
4042
4043
4044static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4045                               struct cftype *cft)
4046{
4047        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4048        enum res_type type = MEMFILE_TYPE(cft->private);
4049        int name = MEMFILE_ATTR(cft->private);
4050
4051        switch (type) {
4052        case _MEM:
4053                if (name == RES_USAGE)
4054                        return mem_cgroup_usage(memcg, false);
4055                return res_counter_read_u64(&memcg->res, name);
4056        case _MEMSWAP:
4057                if (name == RES_USAGE)
4058                        return mem_cgroup_usage(memcg, true);
4059                return res_counter_read_u64(&memcg->memsw, name);
4060        case _KMEM:
4061                return res_counter_read_u64(&memcg->kmem, name);
4062                break;
4063        default:
4064                BUG();
4065        }
4066}
4067
4068#ifdef CONFIG_MEMCG_KMEM
4069/* should be called with activate_kmem_mutex held */
4070static int __memcg_activate_kmem(struct mem_cgroup *memcg,
4071                                 unsigned long long limit)
4072{
4073        int err = 0;
4074        int memcg_id;
4075
4076        if (memcg_kmem_is_active(memcg))
4077                return 0;
4078
4079        /*
4080         * We are going to allocate memory for data shared by all memory
4081         * cgroups so let's stop accounting here.
4082         */
4083        memcg_stop_kmem_account();
4084
4085        /*
4086         * For simplicity, we won't allow this to be disabled.  It also can't
4087         * be changed if the cgroup has children already, or if tasks had
4088         * already joined.
4089         *
4090         * If tasks join before we set the limit, a person looking at
4091         * kmem.usage_in_bytes will have no way to determine when it took
4092         * place, which makes the value quite meaningless.
4093         *
4094         * After it first became limited, changes in the value of the limit are
4095         * of course permitted.
4096         */
4097        mutex_lock(&memcg_create_mutex);
4098        if (cgroup_has_tasks(memcg->css.cgroup) ||
4099            (memcg->use_hierarchy && memcg_has_children(memcg)))
4100                err = -EBUSY;
4101        mutex_unlock(&memcg_create_mutex);
4102        if (err)
4103                goto out;
4104
4105        memcg_id = memcg_alloc_cache_id();
4106        if (memcg_id < 0) {
4107                err = memcg_id;
4108                goto out;
4109        }
4110
4111        memcg->kmemcg_id = memcg_id;
4112        INIT_LIST_HEAD(&memcg->memcg_slab_caches);
4113
4114        /*
4115         * We couldn't have accounted to this cgroup, because it hasn't got the
4116         * active bit set yet, so this should succeed.
4117         */
4118        err = res_counter_set_limit(&memcg->kmem, limit);
4119        VM_BUG_ON(err);
4120
4121        static_key_slow_inc(&memcg_kmem_enabled_key);
4122        /*
4123         * Setting the active bit after enabling static branching will
4124         * guarantee no one starts accounting before all call sites are
4125         * patched.
4126         */
4127        memcg_kmem_set_active(memcg);
4128out:
4129        memcg_resume_kmem_account();
4130        return err;
4131}
4132
4133static int memcg_activate_kmem(struct mem_cgroup *memcg,
4134                               unsigned long long limit)
4135{
4136        int ret;
4137
4138        mutex_lock(&activate_kmem_mutex);
4139        ret = __memcg_activate_kmem(memcg, limit);
4140        mutex_unlock(&activate_kmem_mutex);
4141        return ret;
4142}
4143
4144static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
4145                                   unsigned long long val)
4146{
4147        int ret;
4148
4149        if (!memcg_kmem_is_active(memcg))
4150                ret = memcg_activate_kmem(memcg, val);
4151        else
4152                ret = res_counter_set_limit(&memcg->kmem, val);
4153        return ret;
4154}
4155
4156static int memcg_propagate_kmem(struct mem_cgroup *memcg)
4157{
4158        int ret = 0;
4159        struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4160
4161        if (!parent)
4162                return 0;
4163
4164        mutex_lock(&activate_kmem_mutex);
4165        /*
4166         * If the parent cgroup is not kmem-active now, it cannot be activated
4167         * after this point, because it has at least one child already.
4168         */
4169        if (memcg_kmem_is_active(parent))
4170                ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
4171        mutex_unlock(&activate_kmem_mutex);
4172        return ret;
4173}
4174#else
4175static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
4176                                   unsigned long long val)
4177{
4178        return -EINVAL;
4179}
4180#endif /* CONFIG_MEMCG_KMEM */
4181
4182/*
4183 * The user of this function is...
4184 * RES_LIMIT.
4185 */
4186static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4187                                char *buf, size_t nbytes, loff_t off)
4188{
4189        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4190        enum res_type type;
4191        int name;
4192        unsigned long long val;
4193        int ret;
4194
4195        buf = strstrip(buf);
4196        type = MEMFILE_TYPE(of_cft(of)->private);
4197        name = MEMFILE_ATTR(of_cft(of)->private);
4198
4199        switch (name) {
4200        case RES_LIMIT:
4201                if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4202                        ret = -EINVAL;
4203                        break;
4204                }
4205                /* This function does all necessary parse...reuse it */
4206                ret = res_counter_memparse_write_strategy(buf, &val);
4207                if (ret)
4208                        break;
4209                if (type == _MEM)
4210                        ret = mem_cgroup_resize_limit(memcg, val);
4211                else if (type == _MEMSWAP)
4212                        ret = mem_cgroup_resize_memsw_limit(memcg, val);
4213                else if (type == _KMEM)
4214                        ret = memcg_update_kmem_limit(memcg, val);
4215                else
4216                        return -EINVAL;
4217                break;
4218        case RES_SOFT_LIMIT:
4219                ret = res_counter_memparse_write_strategy(buf, &val);
4220                if (ret)
4221                        break;
4222                /*
4223                 * For memsw, soft limits are hard to implement in terms
4224                 * of semantics, for now, we support soft limits for
4225                 * control without swap
4226                 */
4227                if (type == _MEM)
4228                        ret = res_counter_set_soft_limit(&memcg->res, val);
4229                else
4230                        ret = -EINVAL;
4231                break;
4232        default:
4233                ret = -EINVAL; /* should be BUG() ? */
4234                break;
4235        }
4236        return ret ?: nbytes;
4237}
4238
4239static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
4240                unsigned long long *mem_limit, unsigned long long *memsw_limit)
4241{
4242        unsigned long long min_limit, min_memsw_limit, tmp;
4243
4244        min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4245        min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4246        if (!memcg->use_hierarchy)
4247                goto out;
4248
4249        while (memcg->css.parent) {
4250                memcg = mem_cgroup_from_css(memcg->css.parent);
4251                if (!memcg->use_hierarchy)
4252                        break;
4253                tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
4254                min_limit = min(min_limit, tmp);
4255                tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4256                min_memsw_limit = min(min_memsw_limit, tmp);
4257        }
4258out:
4259        *mem_limit = min_limit;
4260        *memsw_limit = min_memsw_limit;
4261}
4262
4263static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4264                                size_t nbytes, loff_t off)
4265{
4266        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4267        int name;
4268        enum res_type type;
4269
4270        type = MEMFILE_TYPE(of_cft(of)->private);
4271        name = MEMFILE_ATTR(of_cft(of)->private);
4272
4273        switch (name) {
4274        case RES_MAX_USAGE:
4275                if (type == _MEM)
4276                        res_counter_reset_max(&memcg->res);
4277                else if (type == _MEMSWAP)
4278                        res_counter_reset_max(&memcg->memsw);
4279                else if (type == _KMEM)
4280                        res_counter_reset_max(&memcg->kmem);
4281                else
4282                        return -EINVAL;
4283                break;
4284        case RES_FAILCNT:
4285                if (type == _MEM)
4286                        res_counter_reset_failcnt(&memcg->res);
4287                else if (type == _MEMSWAP)
4288                        res_counter_reset_failcnt(&memcg->memsw);
4289                else if (type == _KMEM)
4290                        res_counter_reset_failcnt(&memcg->kmem);
4291                else
4292                        return -EINVAL;
4293                break;
4294        }
4295
4296        return nbytes;
4297}
4298
4299static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4300                                        struct cftype *cft)
4301{
4302        return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4303}
4304
4305#ifdef CONFIG_MMU
4306static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4307                                        struct cftype *cft, u64 val)
4308{
4309        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4310
4311        if (val >= (1 << NR_MOVE_TYPE))
4312                return -EINVAL;
4313
4314        /*
4315         * No kind of locking is needed in here, because ->can_attach() will
4316         * check this value once in the beginning of the process, and then carry
4317         * on with stale data. This means that changes to this value will only
4318         * affect task migrations starting after the change.
4319         */
4320        memcg->move_charge_at_immigrate = val;
4321        return 0;
4322}
4323#else
4324static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4325                                        struct cftype *cft, u64 val)
4326{
4327        return -ENOSYS;
4328}
4329#endif
4330
4331#ifdef CONFIG_NUMA
4332static int memcg_numa_stat_show(struct seq_file *m, void *v)
4333{
4334        struct numa_stat {
4335                const char *name;
4336                unsigned int lru_mask;
4337        };
4338
4339        static const struct numa_stat stats[] = {
4340                { "total", LRU_ALL },
4341                { "file", LRU_ALL_FILE },
4342                { "anon", LRU_ALL_ANON },
4343                { "unevictable", BIT(LRU_UNEVICTABLE) },
4344        };
4345        const struct numa_stat *stat;
4346        int nid;
4347        unsigned long nr;
4348        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4349
4350        for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4351                nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
4352                seq_printf(m, "%s=%lu", stat->name, nr);
4353                for_each_node_state(nid, N_MEMORY) {
4354                        nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4355                                                          stat->lru_mask);
4356                        seq_printf(m, " N%d=%lu", nid, nr);
4357                }
4358                seq_putc(m, '\n');
4359        }
4360
4361        for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4362                struct mem_cgroup *iter;
4363
4364                nr = 0;
4365                for_each_mem_cgroup_tree(iter, memcg)
4366                        nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
4367                seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
4368                for_each_node_state(nid, N_MEMORY) {
4369                        nr = 0;
4370                        for_each_mem_cgroup_tree(iter, memcg)
4371                                nr += mem_cgroup_node_nr_lru_pages(
4372                                        iter, nid, stat->lru_mask);
4373                        seq_printf(m, " N%d=%lu", nid, nr);
4374                }
4375                seq_putc(m, '\n');
4376        }
4377
4378        return 0;
4379}
4380#endif /* CONFIG_NUMA */
4381
4382static inline void mem_cgroup_lru_names_not_uptodate(void)
4383{
4384        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
4385}
4386
4387static int memcg_stat_show(struct seq_file *m, void *v)
4388{
4389        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4390        struct mem_cgroup *mi;
4391        unsigned int i;
4392
4393        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4394                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
4395                        continue;
4396                seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
4397                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
4398        }
4399
4400        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
4401                seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
4402                           mem_cgroup_read_events(memcg, i));
4403
4404        for (i = 0; i < NR_LRU_LISTS; i++)
4405                seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
4406                           mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
4407
4408        /* Hierarchical information */
4409        {
4410                unsigned long long limit, memsw_limit;
4411                memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4412                seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
4413                if (do_swap_account)
4414                        seq_printf(m, "hierarchical_memsw_limit %llu\n",
4415                                   memsw_limit);
4416        }
4417
4418        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4419                long long val = 0;
4420
4421                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
4422                        continue;
4423                for_each_mem_cgroup_tree(mi, memcg)
4424                        val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
4425                seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
4426        }
4427
4428        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
4429                unsigned long long val = 0;
4430
4431                for_each_mem_cgroup_tree(mi, memcg)
4432                        val += mem_cgroup_read_events(mi, i);
4433                seq_printf(m, "total_%s %llu\n",
4434                           mem_cgroup_events_names[i], val);
4435        }
4436
4437        for (i = 0; i < NR_LRU_LISTS; i++) {
4438                unsigned long long val = 0;
4439
4440                for_each_mem_cgroup_tree(mi, memcg)
4441                        val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
4442                seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
4443        }
4444
4445#ifdef CONFIG_DEBUG_VM
4446        {
4447                int nid, zid;
4448                struct mem_cgroup_per_zone *mz;
4449                struct zone_reclaim_stat *rstat;
4450                unsigned long recent_rotated[2] = {0, 0};
4451                unsigned long recent_scanned[2] = {0, 0};
4452
4453                for_each_online_node(nid)
4454                        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4455                                mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
4456                                rstat = &mz->lruvec.reclaim_stat;
4457
4458                                recent_rotated[0] += rstat->recent_rotated[0];
4459                                recent_rotated[1] += rstat->recent_rotated[1];
4460                                recent_scanned[0] += rstat->recent_scanned[0];
4461                                recent_scanned[1] += rstat->recent_scanned[1];
4462                        }
4463                seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
4464                seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
4465                seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
4466                seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
4467        }
4468#endif
4469
4470        return 0;
4471}
4472
4473static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4474                                      struct cftype *cft)
4475{
4476        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4477
4478        return mem_cgroup_swappiness(memcg);
4479}
4480
4481static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4482                                       struct cftype *cft, u64 val)
4483{
4484        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4485
4486        if (val > 100)
4487                return -EINVAL;
4488
4489        if (css->parent)
4490                memcg->swappiness = val;
4491        else
4492                vm_swappiness = val;
4493
4494        return 0;
4495}
4496
4497static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4498{
4499        struct mem_cgroup_threshold_ary *t;
4500        u64 usage;
4501        int i;
4502
4503        rcu_read_lock();
4504        if (!swap)
4505                t = rcu_dereference(memcg->thresholds.primary);
4506        else
4507                t = rcu_dereference(memcg->memsw_thresholds.primary);
4508
4509        if (!t)
4510                goto unlock;
4511
4512        usage = mem_cgroup_usage(memcg, swap);
4513
4514        /*
4515         * current_threshold points to threshold just below or equal to usage.
4516         * If it's not true, a threshold was crossed after last
4517         * call of __mem_cgroup_threshold().
4518         */
4519        i = t->current_threshold;
4520
4521        /*
4522         * Iterate backward over array of thresholds starting from
4523         * current_threshold and check if a threshold is crossed.
4524         * If none of thresholds below usage is crossed, we read
4525         * only one element of the array here.
4526         */
4527        for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4528                eventfd_signal(t->entries[i].eventfd, 1);
4529
4530        /* i = current_threshold + 1 */
4531        i++;
4532
4533        /*
4534         * Iterate forward over array of thresholds starting from
4535         * current_threshold+1 and check if a threshold is crossed.
4536         * If none of thresholds above usage is crossed, we read
4537         * only one element of the array here.
4538         */
4539        for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4540                eventfd_signal(t->entries[i].eventfd, 1);
4541
4542        /* Update current_threshold */
4543        t->current_threshold = i - 1;
4544unlock:
4545        rcu_read_unlock();
4546}
4547
4548static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4549{
4550        while (memcg) {
4551                __mem_cgroup_threshold(memcg, false);
4552                if (do_swap_account)
4553                        __mem_cgroup_threshold(memcg, true);
4554
4555                memcg = parent_mem_cgroup(memcg);
4556        }
4557}
4558
4559static int compare_thresholds(const void *a, const void *b)
4560{
4561        const struct mem_cgroup_threshold *_a = a;
4562        const struct mem_cgroup_threshold *_b = b;
4563
4564        if (_a->threshold > _b->threshold)
4565                return 1;
4566
4567        if (_a->threshold < _b->threshold)
4568                return -1;
4569
4570        return 0;
4571}
4572
4573static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4574{
4575        struct mem_cgroup_eventfd_list *ev;
4576
4577        spin_lock(&memcg_oom_lock);
4578
4579        list_for_each_entry(ev, &memcg->oom_notify, list)
4580                eventfd_signal(ev->eventfd, 1);
4581
4582        spin_unlock(&memcg_oom_lock);
4583        return 0;
4584}
4585
4586static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4587{
4588        struct mem_cgroup *iter;
4589
4590        for_each_mem_cgroup_tree(iter, memcg)
4591                mem_cgroup_oom_notify_cb(iter);
4592}
4593
4594static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4595        struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4596{
4597        struct mem_cgroup_thresholds *thresholds;
4598        struct mem_cgroup_threshold_ary *new;
4599        u64 threshold, usage;
4600        int i, size, ret;
4601
4602        ret = res_counter_memparse_write_strategy(args, &threshold);
4603        if (ret)
4604                return ret;
4605
4606        mutex_lock(&memcg->thresholds_lock);
4607
4608        if (type == _MEM) {
4609                thresholds = &memcg->thresholds;
4610                usage = mem_cgroup_usage(memcg, false);
4611        } else if (type == _MEMSWAP) {
4612                thresholds = &memcg->memsw_thresholds;
4613                usage = mem_cgroup_usage(memcg, true);
4614        } else
4615                BUG();
4616
4617        /* Check if a threshold crossed before adding a new one */
4618        if (thresholds->primary)
4619                __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4620
4621        size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4622
4623        /* Allocate memory for new array of thresholds */
4624        new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4625                        GFP_KERNEL);
4626        if (!new) {
4627                ret = -ENOMEM;
4628                goto unlock;
4629        }
4630        new->size = size;
4631
4632        /* Copy thresholds (if any) to new array */
4633        if (thresholds->primary) {
4634                memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4635                                sizeof(struct mem_cgroup_threshold));
4636        }
4637
4638        /* Add new threshold */
4639        new->entries[size - 1].eventfd = eventfd;
4640        new->entries[size - 1].threshold = threshold;
4641
4642        /* Sort thresholds. Registering of new threshold isn't time-critical */
4643        sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4644                        compare_thresholds, NULL);
4645
4646        /* Find current threshold */
4647        new->current_threshold = -1;
4648        for (i = 0; i < size; i++) {
4649                if (new->entries[i].threshold <= usage) {
4650                        /*
4651                         * new->current_threshold will not be used until
4652                         * rcu_assign_pointer(), so it's safe to increment
4653                         * it here.
4654                         */
4655                        ++new->current_threshold;
4656                } else
4657                        break;
4658        }
4659
4660        /* Free old spare buffer and save old primary buffer as spare */
4661        kfree(thresholds->spare);
4662        thresholds->spare = thresholds->primary;
4663
4664        rcu_assign_pointer(thresholds->primary, new);
4665
4666        /* To be sure that nobody uses thresholds */
4667        synchronize_rcu();
4668
4669unlock:
4670        mutex_unlock(&memcg->thresholds_lock);
4671
4672        return ret;
4673}
4674
4675static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4676        struct eventfd_ctx *eventfd, const char *args)
4677{
4678        return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4679}
4680
4681static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4682        struct eventfd_ctx *eventfd, const char *args)
4683{
4684        return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4685}
4686
4687static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4688        struct eventfd_ctx *eventfd, enum res_type type)
4689{
4690        struct mem_cgroup_thresholds *thresholds;
4691        struct mem_cgroup_threshold_ary *new;
4692        u64 usage;
4693        int i, j, size;
4694
4695        mutex_lock(&memcg->thresholds_lock);
4696
4697        if (type == _MEM) {
4698                thresholds = &memcg->thresholds;
4699                usage = mem_cgroup_usage(memcg, false);
4700        } else if (type == _MEMSWAP) {
4701                thresholds = &memcg->memsw_thresholds;
4702                usage = mem_cgroup_usage(memcg, true);
4703        } else
4704                BUG();
4705
4706        if (!thresholds->primary)
4707                goto unlock;
4708
4709        /* Check if a threshold crossed before removing */
4710        __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4711
4712        /* Calculate new number of threshold */
4713        size = 0;
4714        for (i = 0; i < thresholds->primary->size; i++) {
4715                if (thresholds->primary->entries[i].eventfd != eventfd)
4716                        size++;
4717        }
4718
4719        new = thresholds->spare;
4720
4721        /* Set thresholds array to NULL if we don't have thresholds */
4722        if (!size) {
4723                kfree(new);
4724                new = NULL;
4725                goto swap_buffers;
4726        }
4727
4728        new->size = size;
4729
4730        /* Copy thresholds and find current threshold */
4731        new->current_threshold = -1;
4732        for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4733                if (thresholds->primary->entries[i].eventfd == eventfd)
4734                        continue;
4735
4736                new->entries[j] = thresholds->primary->entries[i];
4737                if (new->entries[j].threshold <= usage) {
4738                        /*
4739                         * new->current_threshold will not be used
4740                         * until rcu_assign_pointer(), so it's safe to increment
4741                         * it here.
4742                         */
4743                        ++new->current_threshold;
4744                }
4745                j++;
4746        }
4747
4748swap_buffers:
4749        /* Swap primary and spare array */
4750        thresholds->spare = thresholds->primary;
4751        /* If all events are unregistered, free the spare array */
4752        if (!new) {
4753                kfree(thresholds->spare);
4754                thresholds->spare = NULL;
4755        }
4756
4757        rcu_assign_pointer(thresholds->primary, new);
4758
4759        /* To be sure that nobody uses thresholds */
4760        synchronize_rcu();
4761unlock:
4762        mutex_unlock(&memcg->thresholds_lock);
4763}
4764
4765static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4766        struct eventfd_ctx *eventfd)
4767{
4768        return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4769}
4770
4771static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4772        struct eventfd_ctx *eventfd)
4773{
4774        return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4775}
4776
4777static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4778        struct eventfd_ctx *eventfd, const char *args)
4779{
4780        struct mem_cgroup_eventfd_list *event;
4781
4782        event = kmalloc(sizeof(*event), GFP_KERNEL);
4783        if (!event)
4784                return -ENOMEM;
4785
4786        spin_lock(&memcg_oom_lock);
4787
4788        event->eventfd = eventfd;
4789        list_add(&event->list, &memcg->oom_notify);
4790
4791        /* already in OOM ? */
4792        if (atomic_read(&memcg->under_oom))
4793                eventfd_signal(eventfd, 1);
4794        spin_unlock(&memcg_oom_lock);
4795
4796        return 0;
4797}
4798
4799static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4800        struct eventfd_ctx *eventfd)
4801{
4802        struct mem_cgroup_eventfd_list *ev, *tmp;
4803
4804        spin_lock(&memcg_oom_lock);
4805
4806        list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4807                if (ev->eventfd == eventfd) {
4808                        list_del(&ev->list);
4809                        kfree(ev);
4810                }
4811        }
4812
4813        spin_unlock(&memcg_oom_lock);
4814}
4815
4816static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4817{
4818        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4819
4820        seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4821        seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
4822        return 0;
4823}
4824
4825static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4826        struct cftype *cft, u64 val)
4827{
4828        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4829
4830        /* cannot set to root cgroup and only 0 and 1 are allowed */
4831        if (!css->parent || !((val == 0) || (val == 1)))
4832                return -EINVAL;
4833
4834        memcg->oom_kill_disable = val;
4835        if (!val)
4836                memcg_oom_recover(memcg);
4837
4838        return 0;
4839}
4840
4841#ifdef CONFIG_MEMCG_KMEM
4842static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4843{
4844        int ret;
4845
4846        memcg->kmemcg_id = -1;
4847        ret = memcg_propagate_kmem(memcg);
4848        if (ret)
4849                return ret;
4850
4851        return mem_cgroup_sockets_init(memcg, ss);
4852}
4853
4854static void memcg_destroy_kmem(struct mem_cgroup *memcg)
4855{
4856        mem_cgroup_sockets_destroy(memcg);
4857}
4858
4859static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
4860{
4861        if (!memcg_kmem_is_active(memcg))
4862                return;
4863
4864        /*
4865         * kmem charges can outlive the cgroup. In the case of slab
4866         * pages, for instance, a page contain objects from various
4867         * processes. As we prevent from taking a reference for every
4868         * such allocation we have to be careful when doing uncharge
4869         * (see memcg_uncharge_kmem) and here during offlining.
4870         *
4871         * The idea is that that only the _last_ uncharge which sees
4872         * the dead memcg will drop the last reference. An additional
4873         * reference is taken here before the group is marked dead
4874         * which is then paired with css_put during uncharge resp. here.
4875         *
4876         * Although this might sound strange as this path is called from
4877         * css_offline() when the referencemight have dropped down to 0 and
4878         * shouldn't be incremented anymore (css_tryget_online() would
4879         * fail) we do not have other options because of the kmem
4880         * allocations lifetime.
4881         */
4882        css_get(&memcg->css);
4883
4884        memcg_kmem_mark_dead(memcg);
4885
4886        if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
4887                return;
4888
4889        if (memcg_kmem_test_and_clear_dead(memcg))
4890                css_put(&memcg->css);
4891}
4892#else
4893static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4894{
4895        return 0;
4896}
4897
4898static void memcg_destroy_kmem(struct mem_cgroup *memcg)
4899{
4900}
4901
4902static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
4903{
4904}
4905#endif
4906
4907/*
4908 * DO NOT USE IN NEW FILES.
4909 *
4910 * "cgroup.event_control" implementation.
4911 *
4912 * This is way over-engineered.  It tries to support fully configurable
4913 * events for each user.  Such level of flexibility is completely
4914 * unnecessary especially in the light of the planned unified hierarchy.
4915 *
4916 * Please deprecate this and replace with something simpler if at all
4917 * possible.
4918 */
4919
4920/*
4921 * Unregister event and free resources.
4922 *
4923 * Gets called from workqueue.
4924 */
4925static void memcg_event_remove(struct work_struct *work)
4926{
4927        struct mem_cgroup_event *event =
4928                container_of(work, struct mem_cgroup_event, remove);
4929        struct mem_cgroup *memcg = event->memcg;
4930
4931        remove_wait_queue(event->wqh, &event->wait);
4932
4933        event->unregister_event(memcg, event->eventfd);
4934
4935        /* Notify userspace the event is going away. */
4936        eventfd_signal(event->eventfd, 1);
4937
4938        eventfd_ctx_put(event->eventfd);
4939        kfree(event);
4940        css_put(&memcg->css);
4941}
4942
4943/*
4944 * Gets called on POLLHUP on eventfd when user closes it.
4945 *
4946 * Called with wqh->lock held and interrupts disabled.
4947 */
4948static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
4949                            int sync, void *key)
4950{
4951        struct mem_cgroup_event *event =
4952                container_of(wait, struct mem_cgroup_event, wait);
4953        struct mem_cgroup *memcg = event->memcg;
4954        unsigned long flags = (unsigned long)key;
4955
4956        if (flags & POLLHUP) {
4957                /*
4958                 * If the event has been detached at cgroup removal, we
4959                 * can simply return knowing the other side will cleanup
4960                 * for us.
4961                 *
4962                 * We can't race against event freeing since the other
4963                 * side will require wqh->lock via remove_wait_queue(),
4964                 * which we hold.
4965                 */
4966                spin_lock(&memcg->event_list_lock);
4967                if (!list_empty(&event->list)) {
4968                        list_del_init(&event->list);
4969                        /*
4970                         * We are in atomic context, but cgroup_event_remove()
4971                         * may sleep, so we have to call it in workqueue.
4972                         */
4973                        schedule_work(&event->remove);
4974                }
4975                spin_unlock(&memcg->event_list_lock);
4976        }
4977
4978        return 0;
4979}
4980
4981static void memcg_event_ptable_queue_proc(struct file *file,
4982                wait_queue_head_t *wqh, poll_table *pt)
4983{
4984        struct mem_cgroup_event *event =
4985                container_of(pt, struct mem_cgroup_event, pt);
4986
4987        event->wqh = wqh;
4988        add_wait_queue(wqh, &event->wait);
4989}
4990
4991/*
4992 * DO NOT USE IN NEW FILES.
4993 *
4994 * Parse input and register new cgroup event handler.
4995 *
4996 * Input must be in format '<event_fd> <control_fd> <args>'.
4997 * Interpretation of args is defined by control file implementation.
4998 */
4999static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5000                                         char *buf, size_t nbytes, loff_t off)
5001{
5002        struct cgroup_subsys_state *css = of_css(of);
5003        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5004        struct mem_cgroup_event *event;
5005        struct cgroup_subsys_state *cfile_css;
5006        unsigned int efd, cfd;
5007        struct fd efile;
5008        struct fd cfile;
5009        const char *name;
5010        char *endp;
5011        int ret;
5012
5013        buf = strstrip(buf);
5014
5015        efd = simple_strtoul(buf, &endp, 10);
5016        if (*endp != ' ')
5017                return -EINVAL;
5018        buf = endp + 1;
5019
5020        cfd = simple_strtoul(buf, &endp, 10);
5021        if ((*endp != ' ') && (*endp != '\0'))
5022                return -EINVAL;
5023        buf = endp + 1;
5024
5025        event = kzalloc(sizeof(*event), GFP_KERNEL);
5026        if (!event)
5027                return -ENOMEM;
5028
5029        event->memcg = memcg;
5030        INIT_LIST_HEAD(&event->list);
5031        init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5032        init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5033        INIT_WORK(&event->remove, memcg_event_remove);
5034
5035        efile = fdget(efd);
5036        if (!efile.file) {
5037                ret = -EBADF;
5038                goto out_kfree;
5039        }
5040
5041        event->eventfd = eventfd_ctx_fileget(efile.file);
5042        if (IS_ERR(event->eventfd)) {
5043                ret = PTR_ERR(event->eventfd);
5044                goto out_put_efile;
5045        }
5046
5047        cfile = fdget(cfd);
5048        if (!cfile.file) {
5049                ret = -EBADF;
5050                goto out_put_eventfd;
5051        }
5052
5053        /* the process need read permission on control file */
5054        /* AV: shouldn't we check that it's been opened for read instead? */
5055        ret = inode_permission(file_inode(cfile.file), MAY_READ);
5056        if (ret < 0)
5057                goto out_put_cfile;
5058
5059        /*
5060         * Determine the event callbacks and set them in @event.  This used
5061         * to be done via struct cftype but cgroup core no longer knows
5062         * about these events.  The following is crude but the whole thing
5063         * is for compatibility anyway.
5064         *
5065         * DO NOT ADD NEW FILES.
5066         */
5067        name = cfile.file->f_dentry->d_name.name;
5068
5069        if (!strcmp(name, "memory.usage_in_bytes")) {
5070                event->register_event = mem_cgroup_usage_register_event;
5071                event->unregister_event = mem_cgroup_usage_unregister_event;
5072        } else if (!strcmp(name, "memory.oom_control")) {
5073                event->register_event = mem_cgroup_oom_register_event;
5074                event->unregister_event = mem_cgroup_oom_unregister_event;
5075        } else if (!strcmp(name, "memory.pressure_level")) {
5076                event->register_event = vmpressure_register_event;
5077                event->unregister_event = vmpressure_unregister_event;
5078        } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5079                event->register_event = memsw_cgroup_usage_register_event;
5080                event->unregister_event = memsw_cgroup_usage_unregister_event;
5081        } else {
5082                ret = -EINVAL;
5083                goto out_put_cfile;
5084        }
5085
5086        /*
5087         * Verify @cfile should belong to @css.  Also, remaining events are
5088         * automatically removed on cgroup destruction but the removal is
5089         * asynchronous, so take an extra ref on @css.
5090         */
5091        cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
5092                                               &memory_cgrp_subsys);
5093        ret = -EINVAL;
5094        if (IS_ERR(cfile_css))
5095                goto out_put_cfile;
5096        if (cfile_css != css) {
5097                css_put(cfile_css);
5098                goto out_put_cfile;
5099        }
5100
5101        ret = event->register_event(memcg, event->eventfd, buf);
5102        if (ret)
5103                goto out_put_css;
5104
5105        efile.file->f_op->poll(efile.file, &event->pt);
5106
5107        spin_lock(&memcg->event_list_lock);
5108        list_add(&event->list, &memcg->event_list);
5109        spin_unlock(&memcg->event_list_lock);
5110
5111        fdput(cfile);
5112        fdput(efile);
5113
5114        return nbytes;
5115
5116out_put_css:
5117        css_put(css);
5118out_put_cfile:
5119        fdput(cfile);
5120out_put_eventfd:
5121        eventfd_ctx_put(event->eventfd);
5122out_put_efile:
5123        fdput(efile);
5124out_kfree:
5125        kfree(event);
5126
5127        return ret;
5128}
5129
5130static struct cftype mem_cgroup_files[] = {
5131        {
5132                .name = "usage_in_bytes",
5133                .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5134                .read_u64 = mem_cgroup_read_u64,
5135        },
5136        {
5137                .name = "max_usage_in_bytes",
5138                .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5139                .write = mem_cgroup_reset,
5140                .read_u64 = mem_cgroup_read_u64,
5141        },
5142        {
5143                .name = "limit_in_bytes",
5144                .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5145                .write = mem_cgroup_write,
5146                .read_u64 = mem_cgroup_read_u64,
5147        },
5148        {
5149                .name = "soft_limit_in_bytes",
5150                .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5151                .write = mem_cgroup_write,
5152                .read_u64 = mem_cgroup_read_u64,
5153        },
5154        {
5155                .name = "failcnt",
5156                .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5157                .write = mem_cgroup_reset,
5158                .read_u64 = mem_cgroup_read_u64,
5159        },
5160        {
5161                .name = "stat",
5162                .seq_show = memcg_stat_show,
5163        },
5164        {
5165                .name = "force_empty",
5166                .write = mem_cgroup_force_empty_write,
5167        },
5168        {
5169                .name = "use_hierarchy",
5170                .write_u64 = mem_cgroup_hierarchy_write,
5171                .read_u64 = mem_cgroup_hierarchy_read,
5172        },
5173        {
5174                .name = "cgroup.event_control",         /* XXX: for compat */
5175                .write = memcg_write_event_control,
5176                .flags = CFTYPE_NO_PREFIX,
5177                .mode = S_IWUGO,
5178        },
5179        {
5180                .name = "swappiness",
5181                .read_u64 = mem_cgroup_swappiness_read,
5182                .write_u64 = mem_cgroup_swappiness_write,
5183        },
5184        {
5185                .name = "move_charge_at_immigrate",
5186                .read_u64 = mem_cgroup_move_charge_read,
5187                .write_u64 = mem_cgroup_move_charge_write,
5188        },
5189        {
5190                .name = "oom_control",
5191                .seq_show = mem_cgroup_oom_control_read,
5192                .write_u64 = mem_cgroup_oom_control_write,
5193                .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5194        },
5195        {
5196                .name = "pressure_level",
5197        },
5198#ifdef CONFIG_NUMA
5199        {
5200                .name = "numa_stat",
5201                .seq_show = memcg_numa_stat_show,
5202        },
5203#endif
5204#ifdef CONFIG_MEMCG_KMEM
5205        {
5206                .name = "kmem.limit_in_bytes",
5207                .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5208                .write = mem_cgroup_write,
5209                .read_u64 = mem_cgroup_read_u64,
5210        },
5211        {
5212                .name = "kmem.usage_in_bytes",
5213                .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5214                .read_u64 = mem_cgroup_read_u64,
5215        },
5216        {
5217                .name = "kmem.failcnt",
5218                .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5219                .write = mem_cgroup_reset,
5220                .read_u64 = mem_cgroup_read_u64,
5221        },
5222        {
5223                .name = "kmem.max_usage_in_bytes",
5224                .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5225                .write = mem_cgroup_reset,
5226                .read_u64 = mem_cgroup_read_u64,
5227        },
5228#ifdef CONFIG_SLABINFO
5229        {
5230                .name = "kmem.slabinfo",
5231                .seq_show = mem_cgroup_slabinfo_read,
5232        },
5233#endif
5234#endif
5235        { },    /* terminate */
5236};
5237
5238#ifdef CONFIG_MEMCG_SWAP
5239static struct cftype memsw_cgroup_files[] = {
5240        {
5241                .name = "memsw.usage_in_bytes",
5242                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5243                .read_u64 = mem_cgroup_read_u64,
5244        },
5245        {
5246                .name = "memsw.max_usage_in_bytes",
5247                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5248                .write = mem_cgroup_reset,
5249                .read_u64 = mem_cgroup_read_u64,
5250        },
5251        {
5252                .name = "memsw.limit_in_bytes",
5253                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5254                .write = mem_cgroup_write,
5255                .read_u64 = mem_cgroup_read_u64,
5256        },
5257        {
5258                .name = "memsw.failcnt",
5259                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5260                .write = mem_cgroup_reset,
5261                .read_u64 = mem_cgroup_read_u64,
5262        },
5263        { },    /* terminate */
5264};
5265#endif
5266static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5267{
5268        struct mem_cgroup_per_node *pn;
5269        struct mem_cgroup_per_zone *mz;
5270        int zone, tmp = node;
5271        /*
5272         * This routine is called against possible nodes.
5273         * But it's BUG to call kmalloc() against offline node.
5274         *
5275         * TODO: this routine can waste much memory for nodes which will
5276         *       never be onlined. It's better to use memory hotplug callback
5277         *       function.
5278         */
5279        if (!node_state(node, N_NORMAL_MEMORY))
5280                tmp = -1;
5281        pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5282        if (!pn)
5283                return 1;
5284
5285        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5286                mz = &pn->zoneinfo[zone];
5287                lruvec_init(&mz->lruvec);
5288                mz->usage_in_excess = 0;
5289                mz->on_tree = false;
5290                mz->memcg = memcg;
5291        }
5292        memcg->nodeinfo[node] = pn;
5293        return 0;
5294}
5295
5296static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5297{
5298        kfree(memcg->nodeinfo[node]);
5299}
5300
5301static struct mem_cgroup *mem_cgroup_alloc(void)
5302{
5303        struct mem_cgroup *memcg;
5304        size_t size;
5305
5306        size = sizeof(struct mem_cgroup);
5307        size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5308
5309        memcg = kzalloc(size, GFP_KERNEL);
5310        if (!memcg)
5311                return NULL;
5312
5313        memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
5314        if (!memcg->stat)
5315                goto out_free;
5316        spin_lock_init(&memcg->pcp_counter_lock);
5317        return memcg;
5318
5319out_free:
5320        kfree(memcg);
5321        return NULL;
5322}
5323
5324/*
5325 * At destroying mem_cgroup, references from swap_cgroup can remain.
5326 * (scanning all at force_empty is too costly...)
5327 *
5328 * Instead of clearing all references at force_empty, we remember
5329 * the number of reference from swap_cgroup and free mem_cgroup when
5330 * it goes down to 0.
5331 *
5332 * Removal of cgroup itself succeeds regardless of refs from swap.
5333 */
5334
5335static void __mem_cgroup_free(struct mem_cgroup *memcg)
5336{
5337        int node;
5338
5339        mem_cgroup_remove_from_trees(memcg);
5340
5341        for_each_node(node)
5342                free_mem_cgroup_per_zone_info(memcg, node);
5343
5344        free_percpu(memcg->stat);
5345
5346        /*
5347         * We need to make sure that (at least for now), the jump label
5348         * destruction code runs outside of the cgroup lock. This is because
5349         * get_online_cpus(), which is called from the static_branch update,
5350         * can't be called inside the cgroup_lock. cpusets are the ones
5351         * enforcing this dependency, so if they ever change, we might as well.
5352         *
5353         * schedule_work() will guarantee this happens. Be careful if you need
5354         * to move this code around, and make sure it is outside
5355         * the cgroup_lock.
5356         */
5357        disarm_static_keys(memcg);
5358        kfree(memcg);
5359}
5360
5361/*
5362 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
5363 */
5364struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
5365{
5366        if (!memcg->res.parent)
5367                return NULL;
5368        return mem_cgroup_from_res_counter(memcg->res.parent, res);
5369}
5370EXPORT_SYMBOL(parent_mem_cgroup);
5371
5372static void __init mem_cgroup_soft_limit_tree_init(void)
5373{
5374        struct mem_cgroup_tree_per_node *rtpn;
5375        struct mem_cgroup_tree_per_zone *rtpz;
5376        int tmp, node, zone;
5377
5378        for_each_node(node) {
5379                tmp = node;
5380                if (!node_state(node, N_NORMAL_MEMORY))
5381                        tmp = -1;
5382                rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
5383                BUG_ON(!rtpn);
5384
5385                soft_limit_tree.rb_tree_per_node[node] = rtpn;
5386
5387                for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5388                        rtpz = &rtpn->rb_tree_per_zone[zone];
5389                        rtpz->rb_root = RB_ROOT;
5390                        spin_lock_init(&rtpz->lock);
5391                }
5392        }
5393}
5394
5395static struct cgroup_subsys_state * __ref
5396mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5397{
5398        struct mem_cgroup *memcg;
5399        long error = -ENOMEM;
5400        int node;
5401
5402        memcg = mem_cgroup_alloc();
5403        if (!memcg)
5404                return ERR_PTR(error);
5405
5406        for_each_node(node)
5407                if (alloc_mem_cgroup_per_zone_info(memcg, node))
5408                        goto free_out;
5409
5410        /* root ? */
5411        if (parent_css == NULL) {
5412                root_mem_cgroup = memcg;
5413                res_counter_init(&memcg->res, NULL);
5414                res_counter_init(&memcg->memsw, NULL);
5415                res_counter_init(&memcg->kmem, NULL);
5416        }
5417
5418        memcg->last_scanned_node = MAX_NUMNODES;
5419        INIT_LIST_HEAD(&memcg->oom_notify);
5420        memcg->move_charge_at_immigrate = 0;
5421        mutex_init(&memcg->thresholds_lock);
5422        spin_lock_init(&memcg->move_lock);
5423        vmpressure_init(&memcg->vmpressure);
5424        INIT_LIST_HEAD(&memcg->event_list);
5425        spin_lock_init(&memcg->event_list_lock);
5426
5427        return &memcg->css;
5428
5429free_out:
5430        __mem_cgroup_free(memcg);
5431        return ERR_PTR(error);
5432}
5433
5434static int
5435mem_cgroup_css_online(struct cgroup_subsys_state *css)
5436{
5437        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5438        struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
5439        int ret;
5440
5441        if (css->id > MEM_CGROUP_ID_MAX)
5442                return -ENOSPC;
5443
5444        if (!parent)
5445                return 0;
5446
5447        mutex_lock(&memcg_create_mutex);
5448
5449        memcg->use_hierarchy = parent->use_hierarchy;
5450        memcg->oom_kill_disable = parent->oom_kill_disable;
5451        memcg->swappiness = mem_cgroup_swappiness(parent);
5452
5453        if (parent->use_hierarchy) {
5454                res_counter_init(&memcg->res, &parent->res);
5455                res_counter_init(&memcg->memsw, &parent->memsw);
5456                res_counter_init(&memcg->kmem, &parent->kmem);
5457
5458                /*
5459                 * No need to take a reference to the parent because cgroup
5460                 * core guarantees its existence.
5461                 */
5462        } else {
5463                res_counter_init(&memcg->res, NULL);
5464                res_counter_init(&memcg->memsw, NULL);
5465                res_counter_init(&memcg->kmem, NULL);
5466                /*
5467                 * Deeper hierachy with use_hierarchy == false doesn't make
5468                 * much sense so let cgroup subsystem know about this
5469                 * unfortunate state in our controller.
5470                 */
5471                if (parent != root_mem_cgroup)
5472                        memory_cgrp_subsys.broken_hierarchy = true;
5473        }
5474        mutex_unlock(&memcg_create_mutex);
5475
5476        ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
5477        if (ret)
5478                return ret;
5479
5480        /*
5481         * Make sure the memcg is initialized: mem_cgroup_iter()
5482         * orders reading memcg->initialized against its callers
5483         * reading the memcg members.
5484         */
5485        smp_store_release(&memcg->initialized, 1);
5486
5487        return 0;
5488}
5489
5490/*
5491 * Announce all parents that a group from their hierarchy is gone.
5492 */
5493static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
5494{
5495        struct mem_cgroup *parent = memcg;
5496
5497        while ((parent = parent_mem_cgroup(parent)))
5498                mem_cgroup_iter_invalidate(parent);
5499
5500        /*
5501         * if the root memcg is not hierarchical we have to check it
5502         * explicitely.
5503         */
5504        if (!root_mem_cgroup->use_hierarchy)
5505                mem_cgroup_iter_invalidate(root_mem_cgroup);
5506}
5507
5508static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5509{
5510        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5511        struct mem_cgroup_event *event, *tmp;
5512        struct cgroup_subsys_state *iter;
5513
5514        /*
5515         * Unregister events and notify userspace.
5516         * Notify userspace about cgroup removing only after rmdir of cgroup
5517         * directory to avoid race between userspace and kernelspace.
5518         */
5519        spin_lock(&memcg->event_list_lock);
5520        list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5521                list_del_init(&event->list);
5522                schedule_work(&event->remove);
5523        }
5524        spin_unlock(&memcg->event_list_lock);
5525
5526        kmem_cgroup_css_offline(memcg);
5527
5528        mem_cgroup_invalidate_reclaim_iterators(memcg);
5529
5530        /*
5531         * This requires that offlining is serialized.  Right now that is
5532         * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
5533         */
5534        css_for_each_descendant_post(iter, css)
5535                mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
5536
5537        memcg_unregister_all_caches(memcg);
5538        vmpressure_cleanup(&memcg->vmpressure);
5539}
5540
5541static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5542{
5543        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5544        /*
5545         * XXX: css_offline() would be where we should reparent all
5546         * memory to prepare the cgroup for destruction.  However,
5547         * memcg does not do css_tryget_online() and res_counter charging
5548         * under the same RCU lock region, which means that charging
5549         * could race with offlining.  Offlining only happens to
5550         * cgroups with no tasks in them but charges can show up
5551         * without any tasks from the swapin path when the target
5552         * memcg is looked up from the swapout record and not from the
5553         * current task as it usually is.  A race like this can leak
5554         * charges and put pages with stale cgroup pointers into
5555         * circulation:
5556         *
5557         * #0                        #1
5558         *                           lookup_swap_cgroup_id()
5559         *                           rcu_read_lock()
5560         *                           mem_cgroup_lookup()
5561         *                           css_tryget_online()
5562         *                           rcu_read_unlock()
5563         * disable css_tryget_online()
5564         * call_rcu()
5565         *   offline_css()
5566         *     reparent_charges()
5567         *                           res_counter_charge()
5568         *                           css_put()
5569         *                             css_free()
5570         *                           pc->mem_cgroup = dead memcg
5571         *                           add page to lru
5572         *
5573         * The bulk of the charges are still moved in offline_css() to
5574         * avoid pinning a lot of pages in case a long-term reference
5575         * like a swapout record is deferring the css_free() to long
5576         * after offlining.  But this makes sure we catch any charges
5577         * made after offlining:
5578         */
5579        mem_cgroup_reparent_charges(memcg);
5580
5581        memcg_destroy_kmem(memcg);
5582        __mem_cgroup_free(memcg);
5583}
5584
5585/**
5586 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5587 * @css: the target css
5588 *
5589 * Reset the states of the mem_cgroup associated with @css.  This is
5590 * invoked when the userland requests disabling on the default hierarchy
5591 * but the memcg is pinned through dependency.  The memcg should stop
5592 * applying policies and should revert to the vanilla state as it may be
5593 * made visible again.
5594 *
5595 * The current implementation only resets the essential configurations.
5596 * This needs to be expanded to cover all the visible parts.
5597 */
5598static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5599{
5600        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5601
5602        mem_cgroup_resize_limit(memcg, ULLONG_MAX);
5603        mem_cgroup_resize_memsw_limit(memcg, ULLONG_MAX);
5604        memcg_update_kmem_limit(memcg, ULLONG_MAX);
5605        res_counter_set_soft_limit(&memcg->res, ULLONG_MAX);
5606}
5607
5608#ifdef CONFIG_MMU
5609/* Handlers for move charge at task migration. */
5610static int mem_cgroup_do_precharge(unsigned long count)
5611{
5612        int ret;
5613
5614        /* Try a single bulk charge without reclaim first */
5615        ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
5616        if (!ret) {
5617                mc.precharge += count;
5618                return ret;
5619        }
5620        if (ret == -EINTR) {
5621                cancel_charge(root_mem_cgroup, count);
5622                return ret;
5623        }
5624
5625        /* Try charges one by one with reclaim */
5626        while (count--) {
5627                ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
5628                /*
5629                 * In case of failure, any residual charges against
5630                 * mc.to will be dropped by mem_cgroup_clear_mc()
5631                 * later on.  However, cancel any charges that are
5632                 * bypassed to root right away or they'll be lost.
5633                 */
5634                if (ret == -EINTR)
5635                        cancel_charge(root_mem_cgroup, 1);
5636                if (ret)
5637                        return ret;
5638                mc.precharge++;
5639                cond_resched();
5640        }
5641        return 0;
5642}
5643
5644/**
5645 * get_mctgt_type - get target type of moving charge
5646 * @vma: the vma the pte to be checked belongs
5647 * @addr: the address corresponding to the pte to be checked
5648 * @ptent: the pte to be checked
5649 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5650 *
5651 * Returns
5652 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5653 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5654 *     move charge. if @target is not NULL, the page is stored in target->page
5655 *     with extra refcnt got(Callers should handle it).
5656 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5657 *     target for charge migration. if @target is not NULL, the entry is stored
5658 *     in target->ent.
5659 *
5660 * Called with pte lock held.
5661 */
5662union mc_target {
5663        struct page     *page;
5664        swp_entry_t     ent;
5665};
5666
5667enum mc_target_type {
5668        MC_TARGET_NONE = 0,
5669        MC_TARGET_PAGE,
5670        MC_TARGET_SWAP,
5671};
5672
5673static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5674                                                unsigned long addr, pte_t ptent)
5675{
5676        struct page *page = vm_normal_page(vma, addr, ptent);
5677
5678        if (!page || !page_mapped(page))
5679                return NULL;
5680        if (PageAnon(page)) {
5681                /* we don't move shared anon */
5682                if (!move_anon())
5683                        return NULL;
5684        } else if (!move_file())
5685                /* we ignore mapcount for file pages */
5686                return NULL;
5687        if (!get_page_unless_zero(page))
5688                return NULL;
5689
5690        return page;
5691}
5692
5693#ifdef CONFIG_SWAP
5694static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5695                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
5696{
5697        struct page *page = NULL;
5698        swp_entry_t ent = pte_to_swp_entry(ptent);
5699
5700        if (!move_anon() || non_swap_entry(ent))
5701                return NULL;
5702        /*
5703         * Because lookup_swap_cache() updates some statistics counter,
5704         * we call find_get_page() with swapper_space directly.
5705         */
5706        page = find_get_page(swap_address_space(ent), ent.val);
5707        if (do_swap_account)
5708                entry->val = ent.val;
5709
5710        return page;
5711}
5712#else
5713static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5714                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
5715{
5716        return NULL;
5717}
5718#endif
5719
5720static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5721                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
5722{
5723        struct page *page = NULL;
5724        struct address_space *mapping;
5725        pgoff_t pgoff;
5726
5727        if (!vma->vm_file) /* anonymous vma */
5728                return NULL;
5729        if (!move_file())
5730                return NULL;
5731
5732        mapping = vma->vm_file->f_mapping;
5733        if (pte_none(ptent))
5734                pgoff = linear_page_index(vma, addr);
5735        else /* pte_file(ptent) is true */
5736                pgoff = pte_to_pgoff(ptent);
5737
5738        /* page is moved even if it's not RSS of this task(page-faulted). */
5739#ifdef CONFIG_SWAP
5740        /* shmem/tmpfs may report page out on swap: account for that too. */
5741        if (shmem_mapping(mapping)) {
5742                page = find_get_entry(mapping, pgoff);
5743                if (radix_tree_exceptional_entry(page)) {
5744                        swp_entry_t swp = radix_to_swp_entry(page);
5745                        if (do_swap_account)
5746                                *entry = swp;
5747                        page = find_get_page(swap_address_space(swp), swp.val);
5748                }
5749        } else
5750                page = find_get_page(mapping, pgoff);
5751#else
5752        page = find_get_page(mapping, pgoff);
5753#endif
5754        return page;
5755}
5756
5757static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5758                unsigned long addr, pte_t ptent, union mc_target *target)
5759{
5760        struct page *page = NULL;
5761        struct page_cgroup *pc;
5762        enum mc_target_type ret = MC_TARGET_NONE;
5763        swp_entry_t ent = { .val = 0 };
5764
5765        if (pte_present(ptent))
5766                page = mc_handle_present_pte(vma, addr, ptent);
5767        else if (is_swap_pte(ptent))
5768                page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5769        else if (pte_none(ptent) || pte_file(ptent))
5770                page = mc_handle_file_pte(vma, addr, ptent, &ent);
5771
5772        if (!page && !ent.val)
5773                return ret;
5774        if (page) {
5775                pc = lookup_page_cgroup(page);
5776                /*
5777                 * Do only loose check w/o serialization.
5778                 * mem_cgroup_move_account() checks the pc is valid or
5779                 * not under LRU exclusion.
5780                 */
5781                if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5782                        ret = MC_TARGET_PAGE;
5783                        if (target)
5784                                target->page = page;
5785                }
5786                if (!ret || !target)
5787                        put_page(page);
5788        }
5789        /* There is a swap entry and a page doesn't exist or isn't charged */
5790        if (ent.val && !ret &&
5791            mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5792                ret = MC_TARGET_SWAP;
5793                if (target)
5794                        target->ent = ent;
5795        }
5796        return ret;
5797}
5798
5799#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5800/*
5801 * We don't consider swapping or file mapped pages because THP does not
5802 * support them for now.
5803 * Caller should make sure that pmd_trans_huge(pmd) is true.
5804 */
5805static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5806                unsigned long addr, pmd_t pmd, union mc_target *target)
5807{
5808        struct page *page = NULL;
5809        struct page_cgroup *pc;
5810        enum mc_target_type ret = MC_TARGET_NONE;
5811
5812        page = pmd_page(pmd);
5813        VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5814        if (!move_anon())
5815                return ret;
5816        pc = lookup_page_cgroup(page);
5817        if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5818                ret = MC_TARGET_PAGE;
5819                if (target) {
5820                        get_page(page);
5821                        target->page = page;
5822                }
5823        }
5824        return ret;
5825}
5826#else
5827static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5828                unsigned long addr, pmd_t pmd, union mc_target *target)
5829{
5830        return MC_TARGET_NONE;
5831}
5832#endif
5833
5834static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5835                                        unsigned long addr, unsigned long end,
5836                                        struct mm_walk *walk)
5837{
5838        struct vm_area_struct *vma = walk->private;
5839        pte_t *pte;
5840        spinlock_t *ptl;
5841
5842        if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
5843                if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5844                        mc.precharge += HPAGE_PMD_NR;
5845                spin_unlock(ptl);
5846                return 0;
5847        }
5848
5849        if (pmd_trans_unstable(pmd))
5850                return 0;
5851        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5852        for (; addr != end; pte++, addr += PAGE_SIZE)
5853                if (get_mctgt_type(vma, addr, *pte, NULL))
5854                        mc.precharge++; /* increment precharge temporarily */
5855        pte_unmap_unlock(pte - 1, ptl);
5856        cond_resched();
5857
5858        return 0;
5859}
5860
5861static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5862{
5863        unsigned long precharge;
5864        struct vm_area_struct *vma;
5865
5866        down_read(&mm->mmap_sem);
5867        for (vma = mm->mmap; vma; vma = vma->vm_next) {
5868                struct mm_walk mem_cgroup_count_precharge_walk = {
5869                        .pmd_entry = mem_cgroup_count_precharge_pte_range,
5870                        .mm = mm,
5871                        .private = vma,
5872                };
5873                if (is_vm_hugetlb_page(vma))
5874                        continue;
5875                walk_page_range(vma->vm_start, vma->vm_end,
5876                                        &mem_cgroup_count_precharge_walk);
5877        }
5878        up_read(&mm->mmap_sem);
5879
5880        precharge = mc.precharge;
5881        mc.precharge = 0;
5882
5883        return precharge;
5884}
5885
5886static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5887{
5888        unsigned long precharge = mem_cgroup_count_precharge(mm);
5889
5890        VM_BUG_ON(mc.moving_task);
5891        mc.moving_task = current;
5892        return mem_cgroup_do_precharge(precharge);
5893}
5894
5895/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5896static void __mem_cgroup_clear_mc(void)
5897{
5898        struct mem_cgroup *from = mc.from;
5899        struct mem_cgroup *to = mc.to;
5900        int i;
5901
5902        /* we must uncharge all the leftover precharges from mc.to */
5903        if (mc.precharge) {
5904                cancel_charge(mc.to, mc.precharge);
5905                mc.precharge = 0;
5906        }
5907        /*
5908         * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5909         * we must uncharge here.
5910         */
5911        if (mc.moved_charge) {
5912                cancel_charge(mc.from, mc.moved_charge);
5913                mc.moved_charge = 0;
5914        }
5915        /* we must fixup refcnts and charges */
5916        if (mc.moved_swap) {
5917                /* uncharge swap account from the old cgroup */
5918                if (!mem_cgroup_is_root(mc.from))
5919                        res_counter_uncharge(&mc.from->memsw,
5920                                             PAGE_SIZE * mc.moved_swap);
5921
5922                for (i = 0; i < mc.moved_swap; i++)
5923                        css_put(&mc.from->css);
5924
5925                /*
5926                 * we charged both to->res and to->memsw, so we should
5927                 * uncharge to->res.
5928                 */
5929                if (!mem_cgroup_is_root(mc.to))
5930                        res_counter_uncharge(&mc.to->res,
5931                                             PAGE_SIZE * mc.moved_swap);
5932                /* we've already done css_get(mc.to) */
5933                mc.moved_swap = 0;
5934        }
5935        memcg_oom_recover(from);
5936        memcg_oom_recover(to);
5937        wake_up_all(&mc.waitq);
5938}
5939
5940static void mem_cgroup_clear_mc(void)
5941{
5942        struct mem_cgroup *from = mc.from;
5943
5944        /*
5945         * we must clear moving_task before waking up waiters at the end of
5946         * task migration.
5947         */
5948        mc.moving_task = NULL;
5949        __mem_cgroup_clear_mc();
5950        spin_lock(&mc.lock);
5951        mc.from = NULL;
5952        mc.to = NULL;
5953        spin_unlock(&mc.lock);
5954        mem_cgroup_end_move(from);
5955}
5956
5957static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5958                                 struct cgroup_taskset *tset)
5959{
5960        struct task_struct *p = cgroup_taskset_first(tset);
5961        int ret = 0;
5962        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5963        unsigned long move_charge_at_immigrate;
5964
5965        /*
5966         * We are now commited to this value whatever it is. Changes in this
5967         * tunable will only affect upcoming migrations, not the current one.
5968         * So we need to save it, and keep it going.
5969         */
5970        move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
5971        if (move_charge_at_immigrate) {
5972                struct mm_struct *mm;
5973                struct mem_cgroup *from = mem_cgroup_from_task(p);
5974
5975                VM_BUG_ON(from == memcg);
5976
5977                mm = get_task_mm(p);
5978                if (!mm)
5979                        return 0;
5980                /* We move charges only when we move a owner of the mm */
5981                if (mm->owner == p) {
5982                        VM_BUG_ON(mc.from);
5983                        VM_BUG_ON(mc.to);
5984                        VM_BUG_ON(mc.precharge);
5985                        VM_BUG_ON(mc.moved_charge);
5986                        VM_BUG_ON(mc.moved_swap);
5987                        mem_cgroup_start_move(from);
5988                        spin_lock(&mc.lock);
5989                        mc.from = from;
5990                        mc.to = memcg;
5991                        mc.immigrate_flags = move_charge_at_immigrate;
5992                        spin_unlock(&mc.lock);
5993                        /* We set mc.moving_task later */
5994
5995                        ret = mem_cgroup_precharge_mc(mm);
5996                        if (ret)
5997                                mem_cgroup_clear_mc();
5998                }
5999                mmput(mm);
6000        }
6001        return ret;
6002}
6003
6004static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6005                                     struct cgroup_taskset *tset)
6006{
6007        mem_cgroup_clear_mc();
6008}
6009
6010static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6011                                unsigned long addr, unsigned long end,
6012                                struct mm_walk *walk)
6013{
6014        int ret = 0;
6015        struct vm_area_struct *vma = walk->private;
6016        pte_t *pte;
6017        spinlock_t *ptl;
6018        enum mc_target_type target_type;
6019        union mc_target target;
6020        struct page *page;
6021        struct page_cgroup *pc;
6022
6023        /*
6024         * We don't take compound_lock() here but no race with splitting thp
6025         * happens because:
6026         *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6027         *    under splitting, which means there's no concurrent thp split,
6028         *  - if another thread runs into split_huge_page() just after we
6029         *    entered this if-block, the thread must wait for page table lock
6030         *    to be unlocked in __split_huge_page_splitting(), where the main
6031         *    part of thp split is not executed yet.
6032         */
6033        if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6034                if (mc.precharge < HPAGE_PMD_NR) {
6035                        spin_unlock(ptl);
6036                        return 0;
6037                }
6038                target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6039                if (target_type == MC_TARGET_PAGE) {
6040                        page = target.page;
6041                        if (!isolate_lru_page(page)) {
6042                                pc = lookup_page_cgroup(page);
6043                                if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6044                                                        pc, mc.from, mc.to)) {
6045                                        mc.precharge -= HPAGE_PMD_NR;
6046                                        mc.moved_charge += HPAGE_PMD_NR;
6047                                }
6048                                putback_lru_page(page);
6049                        }
6050                        put_page(page);
6051                }
6052                spin_unlock(ptl);
6053                return 0;
6054        }
6055
6056        if (pmd_trans_unstable(pmd))
6057                return 0;
6058retry:
6059        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6060        for (; addr != end; addr += PAGE_SIZE) {
6061                pte_t ptent = *(pte++);
6062                swp_entry_t ent;
6063
6064                if (!mc.precharge)
6065                        break;
6066
6067                switch (get_mctgt_type(vma, addr, ptent, &target)) {
6068                case MC_TARGET_PAGE:
6069                        page = target.page;
6070                        if (isolate_lru_page(page))
6071                                goto put;
6072                        pc = lookup_page_cgroup(page);
6073                        if (!mem_cgroup_move_account(page, 1, pc,
6074                                                     mc.from, mc.to)) {
6075                                mc.precharge--;
6076                                /* we uncharge from mc.from later. */
6077                                mc.moved_charge++;
6078                        }
6079                        putback_lru_page(page);
6080put:                    /* get_mctgt_type() gets the page */
6081                        put_page(page);
6082                        break;
6083                case MC_TARGET_SWAP:
6084                        ent = target.ent;
6085                        if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6086                                mc.precharge--;
6087                                /* we fixup refcnts and charges later. */
6088                                mc.moved_swap++;
6089                        }
6090                        break;
6091                default:
6092                        break;
6093                }
6094        }
6095        pte_unmap_unlock(pte - 1, ptl);
6096        cond_resched();
6097
6098        if (addr != end) {
6099                /*
6100                 * We have consumed all precharges we got in can_attach().
6101                 * We try charge one by one, but don't do any additional
6102                 * charges to mc.to if we have failed in charge once in attach()
6103                 * phase.
6104                 */
6105                ret = mem_cgroup_do_precharge(1);
6106                if (!ret)
6107                        goto retry;
6108        }
6109
6110        return ret;
6111}
6112
6113static void mem_cgroup_move_charge(struct mm_struct *mm)
6114{
6115        struct vm_area_struct *vma;
6116
6117        lru_add_drain_all();
6118retry:
6119        if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
6120                /*
6121                 * Someone who are holding the mmap_sem might be waiting in
6122                 * waitq. So we cancel all extra charges, wake up all waiters,
6123                 * and retry. Because we cancel precharges, we might not be able
6124                 * to move enough charges, but moving charge is a best-effort
6125                 * feature anyway, so it wouldn't be a big problem.
6126                 */
6127                __mem_cgroup_clear_mc();
6128                cond_resched();
6129                goto retry;
6130        }
6131        for (vma = mm->mmap; vma; vma = vma->vm_next) {
6132                int ret;
6133                struct mm_walk mem_cgroup_move_charge_walk = {
6134                        .pmd_entry = mem_cgroup_move_charge_pte_range,
6135                        .mm = mm,
6136                        .private = vma,
6137                };
6138                if (is_vm_hugetlb_page(vma))
6139                        continue;
6140                ret = walk_page_range(vma->vm_start, vma->vm_end,
6141                                                &mem_cgroup_move_charge_walk);
6142                if (ret)
6143                        /*
6144                         * means we have consumed all precharges and failed in
6145                         * doing additional charge. Just abandon here.
6146                         */
6147                        break;
6148        }
6149        up_read(&mm->mmap_sem);
6150}
6151
6152static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6153                                 struct cgroup_taskset *tset)
6154{
6155        struct task_struct *p = cgroup_taskset_first(tset);
6156        struct mm_struct *mm = get_task_mm(p);
6157
6158        if (mm) {
6159                if (mc.to)
6160                        mem_cgroup_move_charge(mm);
6161                mmput(mm);
6162        }
6163        if (mc.to)
6164                mem_cgroup_clear_mc();
6165}
6166#else   /* !CONFIG_MMU */
6167static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6168                                 struct cgroup_taskset *tset)
6169{
6170        return 0;
6171}
6172static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6173                                     struct cgroup_taskset *tset)
6174{
6175}
6176static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6177                                 struct cgroup_taskset *tset)
6178{
6179}
6180#endif
6181
6182/*
6183 * Cgroup retains root cgroups across [un]mount cycles making it necessary
6184 * to verify whether we're attached to the default hierarchy on each mount
6185 * attempt.
6186 */
6187static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6188{
6189        /*
6190         * use_hierarchy is forced on the default hierarchy.  cgroup core
6191         * guarantees that @root doesn't have any children, so turning it
6192         * on for the root memcg is enough.
6193         */
6194        if (cgroup_on_dfl(root_css->cgroup))
6195                mem_cgroup_from_css(root_css)->use_hierarchy = true;
6196}
6197
6198struct cgroup_subsys memory_cgrp_subsys = {
6199        .css_alloc = mem_cgroup_css_alloc,
6200        .css_online = mem_cgroup_css_online,
6201        .css_offline = mem_cgroup_css_offline,
6202        .css_free = mem_cgroup_css_free,
6203        .css_reset = mem_cgroup_css_reset,
6204        .can_attach = mem_cgroup_can_attach,
6205        .cancel_attach = mem_cgroup_cancel_attach,
6206        .attach = mem_cgroup_move_task,
6207        .bind = mem_cgroup_bind,
6208        .legacy_cftypes = mem_cgroup_files,
6209        .early_init = 0,
6210};
6211
6212#ifdef CONFIG_MEMCG_SWAP
6213static int __init enable_swap_account(char *s)
6214{
6215        if (!strcmp(s, "1"))
6216                really_do_swap_account = 1;
6217        else if (!strcmp(s, "0"))
6218                really_do_swap_account = 0;
6219        return 1;
6220}
6221__setup("swapaccount=", enable_swap_account);
6222
6223static void __init memsw_file_init(void)
6224{
6225        WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6226                                          memsw_cgroup_files));
6227}
6228
6229static void __init enable_swap_cgroup(void)
6230{
6231        if (!mem_cgroup_disabled() && really_do_swap_account) {
6232                do_swap_account = 1;
6233                memsw_file_init();
6234        }
6235}
6236
6237#else
6238static void __init enable_swap_cgroup(void)
6239{
6240}
6241#endif
6242
6243#ifdef CONFIG_MEMCG_SWAP
6244/**
6245 * mem_cgroup_swapout - transfer a memsw charge to swap
6246 * @page: page whose memsw charge to transfer
6247 * @entry: swap entry to move the charge to
6248 *
6249 * Transfer the memsw charge of @page to @entry.
6250 */
6251void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
6252{
6253        struct page_cgroup *pc;
6254        unsigned short oldid;
6255
6256        VM_BUG_ON_PAGE(PageLRU(page), page);
6257        VM_BUG_ON_PAGE(page_count(page), page);
6258
6259        if (!do_swap_account)
6260                return;
6261
6262        pc = lookup_page_cgroup(page);
6263
6264        /* Readahead page, never charged */
6265        if (!PageCgroupUsed(pc))
6266                return;
6267
6268        VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page);
6269
6270        oldid = swap_cgroup_record(entry, mem_cgroup_id(pc->mem_cgroup));
6271        VM_BUG_ON_PAGE(oldid, page);
6272
6273        pc->flags &= ~PCG_MEMSW;
6274        css_get(&pc->mem_cgroup->css);
6275        mem_cgroup_swap_statistics(pc->mem_cgroup, true);
6276}
6277
6278/**
6279 * mem_cgroup_uncharge_swap - uncharge a swap entry
6280 * @entry: swap entry to uncharge
6281 *
6282 * Drop the memsw charge associated with @entry.
6283 */
6284void mem_cgroup_uncharge_swap(swp_entry_t entry)
6285{
6286        struct mem_cgroup *memcg;
6287        unsigned short id;
6288
6289        if (!do_swap_account)
6290                return;
6291
6292        id = swap_cgroup_record(entry, 0);
6293        rcu_read_lock();
6294        memcg = mem_cgroup_lookup(id);
6295        if (memcg) {
6296                if (!mem_cgroup_is_root(memcg))
6297                        res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
6298                mem_cgroup_swap_statistics(memcg, false);
6299                css_put(&memcg->css);
6300        }
6301        rcu_read_unlock();
6302}
6303#endif
6304
6305/**
6306 * mem_cgroup_try_charge - try charging a page
6307 * @page: page to charge
6308 * @mm: mm context of the victim
6309 * @gfp_mask: reclaim mode
6310 * @memcgp: charged memcg return
6311 *
6312 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6313 * pages according to @gfp_mask if necessary.
6314 *
6315 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
6316 * Otherwise, an error code is returned.
6317 *
6318 * After page->mapping has been set up, the caller must finalize the
6319 * charge with mem_cgroup_commit_charge().  Or abort the transaction
6320 * with mem_cgroup_cancel_charge() in case page instantiation fails.
6321 */
6322int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
6323                          gfp_t gfp_mask, struct mem_cgroup **memcgp)
6324{
6325        struct mem_cgroup *memcg = NULL;
6326        unsigned int nr_pages = 1;
6327        int ret = 0;
6328
6329        if (mem_cgroup_disabled())
6330                goto out;
6331
6332        if (PageSwapCache(page)) {
6333                struct page_cgroup *pc = lookup_page_cgroup(page);
6334                /*
6335                 * Every swap fault against a single page tries to charge the
6336                 * page, bail as early as possible.  shmem_unuse() encounters
6337                 * already charged pages, too.  The USED bit is protected by
6338                 * the page lock, which serializes swap cache removal, which
6339                 * in turn serializes uncharging.
6340                 */
6341                if (PageCgroupUsed(pc))
6342                        goto out;
6343        }
6344
6345        if (PageTransHuge(page)) {
6346                nr_pages <<= compound_order(page);
6347                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
6348        }
6349
6350        if (do_swap_account && PageSwapCache(page))
6351                memcg = try_get_mem_cgroup_from_page(page);
6352        if (!memcg)
6353                memcg = get_mem_cgroup_from_mm(mm);
6354
6355        ret = try_charge(memcg, gfp_mask, nr_pages);
6356
6357        css_put(&memcg->css);
6358
6359        if (ret == -EINTR) {
6360                memcg = root_mem_cgroup;
6361                ret = 0;
6362        }
6363out:
6364        *memcgp = memcg;
6365        return ret;
6366}
6367
6368/**
6369 * mem_cgroup_commit_charge - commit a page charge
6370 * @page: page to charge
6371 * @memcg: memcg to charge the page to
6372 * @lrucare: page might be on LRU already
6373 *
6374 * Finalize a charge transaction started by mem_cgroup_try_charge(),
6375 * after page->mapping has been set up.  This must happen atomically
6376 * as part of the page instantiation, i.e. under the page table lock
6377 * for anonymous pages, under the page lock for page and swap cache.
6378 *
6379 * In addition, the page must not be on the LRU during the commit, to
6380 * prevent racing with task migration.  If it might be, use @lrucare.
6381 *
6382 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
6383 */
6384void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
6385                              bool lrucare)
6386{
6387        unsigned int nr_pages = 1;
6388
6389        VM_BUG_ON_PAGE(!page->mapping, page);
6390        VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
6391
6392        if (mem_cgroup_disabled())
6393                return;
6394        /*
6395         * Swap faults will attempt to charge the same page multiple
6396         * times.  But reuse_swap_page() might have removed the page
6397         * from swapcache already, so we can't check PageSwapCache().
6398         */
6399        if (!memcg)
6400                return;
6401
6402        commit_charge(page, memcg, lrucare);
6403
6404        if (PageTransHuge(page)) {
6405                nr_pages <<= compound_order(page);
6406                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
6407        }
6408
6409        local_irq_disable();
6410        mem_cgroup_charge_statistics(memcg, page, nr_pages);
6411        memcg_check_events(memcg, page);
6412        local_irq_enable();
6413
6414        if (do_swap_account && PageSwapCache(page)) {
6415                swp_entry_t entry = { .val = page_private(page) };
6416                /*
6417                 * The swap entry might not get freed for a long time,
6418                 * let's not wait for it.  The page already received a
6419                 * memory+swap charge, drop the swap entry duplicate.
6420                 */
6421                mem_cgroup_uncharge_swap(entry);
6422        }
6423}
6424
6425/**
6426 * mem_cgroup_cancel_charge - cancel a page charge
6427 * @page: page to charge
6428 * @memcg: memcg to charge the page to
6429 *
6430 * Cancel a charge transaction started by mem_cgroup_try_charge().
6431 */
6432void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
6433{
6434        unsigned int nr_pages = 1;
6435
6436        if (mem_cgroup_disabled())
6437                return;
6438        /*
6439         * Swap faults will attempt to charge the same page multiple
6440         * times.  But reuse_swap_page() might have removed the page
6441         * from swapcache already, so we can't check PageSwapCache().
6442         */
6443        if (!memcg)
6444                return;
6445
6446        if (PageTransHuge(page)) {
6447                nr_pages <<= compound_order(page);
6448                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
6449        }
6450
6451        cancel_charge(memcg, nr_pages);
6452}
6453
6454static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
6455                           unsigned long nr_mem, unsigned long nr_memsw,
6456                           unsigned long nr_anon, unsigned long nr_file,
6457                           unsigned long nr_huge, struct page *dummy_page)
6458{
6459        unsigned long flags;
6460
6461        if (!mem_cgroup_is_root(memcg)) {
6462                if (nr_mem)
6463                        res_counter_uncharge(&memcg->res,
6464                                             nr_mem * PAGE_SIZE);
6465                if (nr_memsw)
6466                        res_counter_uncharge(&memcg->memsw,
6467                                             nr_memsw * PAGE_SIZE);
6468                memcg_oom_recover(memcg);
6469        }
6470
6471        local_irq_save(flags);
6472        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
6473        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
6474        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
6475        __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
6476        __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file);
6477        memcg_check_events(memcg, dummy_page);
6478        local_irq_restore(flags);
6479}
6480
6481static void uncharge_list(struct list_head *page_list)
6482{
6483        struct mem_cgroup *memcg = NULL;
6484        unsigned long nr_memsw = 0;
6485        unsigned long nr_anon = 0;
6486        unsigned long nr_file = 0;
6487        unsigned long nr_huge = 0;
6488        unsigned long pgpgout = 0;
6489        unsigned long nr_mem = 0;
6490        struct list_head *next;
6491        struct page *page;
6492
6493        next = page_list->next;
6494        do {
6495                unsigned int nr_pages = 1;
6496                struct page_cgroup *pc;
6497
6498                page = list_entry(next, struct page, lru);
6499                next = page->lru.next;
6500
6501                VM_BUG_ON_PAGE(PageLRU(page), page);
6502                VM_BUG_ON_PAGE(page_count(page), page);
6503
6504                pc = lookup_page_cgroup(page);
6505                if (!PageCgroupUsed(pc))
6506                        continue;
6507
6508                /*
6509                 * Nobody should be changing or seriously looking at
6510                 * pc->mem_cgroup and pc->flags at this point, we have
6511                 * fully exclusive access to the page.
6512                 */
6513
6514                if (memcg != pc->mem_cgroup) {
6515                        if (memcg) {
6516                                uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
6517                                               nr_anon, nr_file, nr_huge, page);
6518                                pgpgout = nr_mem = nr_memsw = 0;
6519                                nr_anon = nr_file = nr_huge = 0;
6520                        }
6521                        memcg = pc->mem_cgroup;
6522                }
6523
6524                if (PageTransHuge(page)) {
6525                        nr_pages <<= compound_order(page);
6526                        VM_BUG_ON_PAGE(!PageTransHuge(page), page);
6527                        nr_huge += nr_pages;
6528                }
6529
6530                if (PageAnon(page))
6531                        nr_anon += nr_pages;
6532                else
6533                        nr_file += nr_pages;
6534
6535                if (pc->flags & PCG_MEM)
6536                        nr_mem += nr_pages;
6537                if (pc->flags & PCG_MEMSW)
6538                        nr_memsw += nr_pages;
6539                pc->flags = 0;
6540
6541                pgpgout++;
6542        } while (next != page_list);
6543
6544        if (memcg)
6545                uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
6546                               nr_anon, nr_file, nr_huge, page);
6547}
6548
6549/**
6550 * mem_cgroup_uncharge - uncharge a page
6551 * @page: page to uncharge
6552 *
6553 * Uncharge a page previously charged with mem_cgroup_try_charge() and
6554 * mem_cgroup_commit_charge().
6555 */
6556void mem_cgroup_uncharge(struct page *page)
6557{
6558        struct page_cgroup *pc;
6559
6560        if (mem_cgroup_disabled())
6561                return;
6562
6563        /* Don't touch page->lru of any random page, pre-check: */
6564        pc = lookup_page_cgroup(page);
6565        if (!PageCgroupUsed(pc))
6566                return;
6567
6568        INIT_LIST_HEAD(&page->lru);
6569        uncharge_list(&page->lru);
6570}
6571
6572/**
6573 * mem_cgroup_uncharge_list - uncharge a list of page
6574 * @page_list: list of pages to uncharge
6575 *
6576 * Uncharge a list of pages previously charged with
6577 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6578 */
6579void mem_cgroup_uncharge_list(struct list_head *page_list)
6580{
6581        if (mem_cgroup_disabled())
6582                return;
6583
6584        if (!list_empty(page_list))
6585                uncharge_list(page_list);
6586}
6587
6588/**
6589 * mem_cgroup_migrate - migrate a charge to another page
6590 * @oldpage: currently charged page
6591 * @newpage: page to transfer the charge to
6592 * @lrucare: both pages might be on the LRU already
6593 *
6594 * Migrate the charge from @oldpage to @newpage.
6595 *
6596 * Both pages must be locked, @newpage->mapping must be set up.
6597 */
6598void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
6599                        bool lrucare)
6600{
6601        struct page_cgroup *pc;
6602        int isolated;
6603
6604        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6605        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6606        VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
6607        VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
6608        VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6609        VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6610                       newpage);
6611
6612        if (mem_cgroup_disabled())
6613                return;
6614
6615        /* Page cache replacement: new page already charged? */
6616        pc = lookup_page_cgroup(newpage);
6617        if (PageCgroupUsed(pc))
6618                return;
6619
6620        /* Re-entrant migration: old page already uncharged? */
6621        pc = lookup_page_cgroup(oldpage);
6622        if (!PageCgroupUsed(pc))
6623                return;
6624
6625        VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage);
6626        VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage);
6627
6628        if (lrucare)
6629                lock_page_lru(oldpage, &isolated);
6630
6631        pc->flags = 0;
6632
6633        if (lrucare)
6634                unlock_page_lru(oldpage, isolated);
6635
6636        commit_charge(newpage, pc->mem_cgroup, lrucare);
6637}
6638
6639/*
6640 * subsys_initcall() for memory controller.
6641 *
6642 * Some parts like hotcpu_notifier() have to be initialized from this context
6643 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
6644 * everything that doesn't depend on a specific mem_cgroup structure should
6645 * be initialized from here.
6646 */
6647static int __init mem_cgroup_init(void)
6648{
6649        hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6650        enable_swap_cgroup();
6651        mem_cgroup_soft_limit_tree_init();
6652        memcg_stock_init();
6653        return 0;
6654}
6655subsys_initcall(mem_cgroup_init);
6656