linux/mm/memcontrol.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
  26 */
  27
  28#include <linux/page_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/pagewalk.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/vm_event_item.h>
  37#include <linux/smp.h>
  38#include <linux/page-flags.h>
  39#include <linux/backing-dev.h>
  40#include <linux/bit_spinlock.h>
  41#include <linux/rcupdate.h>
  42#include <linux/limits.h>
  43#include <linux/export.h>
  44#include <linux/mutex.h>
  45#include <linux/rbtree.h>
  46#include <linux/slab.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/spinlock.h>
  50#include <linux/eventfd.h>
  51#include <linux/poll.h>
  52#include <linux/sort.h>
  53#include <linux/fs.h>
  54#include <linux/seq_file.h>
  55#include <linux/vmpressure.h>
  56#include <linux/mm_inline.h>
  57#include <linux/swap_cgroup.h>
  58#include <linux/cpu.h>
  59#include <linux/oom.h>
  60#include <linux/lockdep.h>
  61#include <linux/file.h>
  62#include <linux/tracehook.h>
  63#include <linux/psi.h>
  64#include <linux/seq_buf.h>
  65#include "internal.h"
  66#include <net/sock.h>
  67#include <net/ip.h>
  68#include "slab.h"
  69
  70#include <linux/uaccess.h>
  71
  72#include <trace/events/vmscan.h>
  73
  74struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  75EXPORT_SYMBOL(memory_cgrp_subsys);
  76
  77struct mem_cgroup *root_mem_cgroup __read_mostly;
  78
  79/* Active memory cgroup to use from an interrupt context */
  80DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  81
  82/* Socket memory accounting disabled? */
  83static bool cgroup_memory_nosocket;
  84
  85/* Kernel memory accounting disabled? */
  86static bool cgroup_memory_nokmem;
  87
  88/* Whether the swap controller is active */
  89#ifdef CONFIG_MEMCG_SWAP
  90bool cgroup_memory_noswap __read_mostly;
  91#else
  92#define cgroup_memory_noswap            1
  93#endif
  94
  95#ifdef CONFIG_CGROUP_WRITEBACK
  96static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
  97#endif
  98
  99/* Whether legacy memory+swap accounting is active */
 100static bool do_memsw_account(void)
 101{
 102        return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
 103}
 104
 105#define THRESHOLDS_EVENTS_TARGET 128
 106#define SOFTLIMIT_EVENTS_TARGET 1024
 107
 108/*
 109 * Cgroups above their limits are maintained in a RB-Tree, independent of
 110 * their hierarchy representation
 111 */
 112
 113struct mem_cgroup_tree_per_node {
 114        struct rb_root rb_root;
 115        struct rb_node *rb_rightmost;
 116        spinlock_t lock;
 117};
 118
 119struct mem_cgroup_tree {
 120        struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 121};
 122
 123static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 124
 125/* for OOM */
 126struct mem_cgroup_eventfd_list {
 127        struct list_head list;
 128        struct eventfd_ctx *eventfd;
 129};
 130
 131/*
 132 * cgroup_event represents events which userspace want to receive.
 133 */
 134struct mem_cgroup_event {
 135        /*
 136         * memcg which the event belongs to.
 137         */
 138        struct mem_cgroup *memcg;
 139        /*
 140         * eventfd to signal userspace about the event.
 141         */
 142        struct eventfd_ctx *eventfd;
 143        /*
 144         * Each of these stored in a list by the cgroup.
 145         */
 146        struct list_head list;
 147        /*
 148         * register_event() callback will be used to add new userspace
 149         * waiter for changes related to this event.  Use eventfd_signal()
 150         * on eventfd to send notification to userspace.
 151         */
 152        int (*register_event)(struct mem_cgroup *memcg,
 153                              struct eventfd_ctx *eventfd, const char *args);
 154        /*
 155         * unregister_event() callback will be called when userspace closes
 156         * the eventfd or on cgroup removing.  This callback must be set,
 157         * if you want provide notification functionality.
 158         */
 159        void (*unregister_event)(struct mem_cgroup *memcg,
 160                                 struct eventfd_ctx *eventfd);
 161        /*
 162         * All fields below needed to unregister event when
 163         * userspace closes eventfd.
 164         */
 165        poll_table pt;
 166        wait_queue_head_t *wqh;
 167        wait_queue_entry_t wait;
 168        struct work_struct remove;
 169};
 170
 171static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 172static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 173
 174/* Stuffs for move charges at task migration. */
 175/*
 176 * Types of charges to be moved.
 177 */
 178#define MOVE_ANON       0x1U
 179#define MOVE_FILE       0x2U
 180#define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
 181
 182/* "mc" and its members are protected by cgroup_mutex */
 183static struct move_charge_struct {
 184        spinlock_t        lock; /* for from, to */
 185        struct mm_struct  *mm;
 186        struct mem_cgroup *from;
 187        struct mem_cgroup *to;
 188        unsigned long flags;
 189        unsigned long precharge;
 190        unsigned long moved_charge;
 191        unsigned long moved_swap;
 192        struct task_struct *moving_task;        /* a task moving charges */
 193        wait_queue_head_t waitq;                /* a waitq for other context */
 194} mc = {
 195        .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 196        .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 197};
 198
 199/*
 200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 201 * limit reclaim to prevent infinite loops, if they ever occur.
 202 */
 203#define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
 204#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
 205
 206/* for encoding cft->private value on file */
 207enum res_type {
 208        _MEM,
 209        _MEMSWAP,
 210        _OOM_TYPE,
 211        _KMEM,
 212        _TCP,
 213};
 214
 215#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
 216#define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
 217#define MEMFILE_ATTR(val)       ((val) & 0xffff)
 218/* Used for OOM nofiier */
 219#define OOM_CONTROL             (0)
 220
 221/*
 222 * Iteration constructs for visiting all cgroups (under a tree).  If
 223 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 224 * be used for reference counting.
 225 */
 226#define for_each_mem_cgroup_tree(iter, root)            \
 227        for (iter = mem_cgroup_iter(root, NULL, NULL);  \
 228             iter != NULL;                              \
 229             iter = mem_cgroup_iter(root, iter, NULL))
 230
 231#define for_each_mem_cgroup(iter)                       \
 232        for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
 233             iter != NULL;                              \
 234             iter = mem_cgroup_iter(NULL, iter, NULL))
 235
 236static inline bool should_force_charge(void)
 237{
 238        return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 239                (current->flags & PF_EXITING);
 240}
 241
 242/* Some nice accessors for the vmpressure. */
 243struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 244{
 245        if (!memcg)
 246                memcg = root_mem_cgroup;
 247        return &memcg->vmpressure;
 248}
 249
 250struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 251{
 252        return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 253}
 254
 255#ifdef CONFIG_MEMCG_KMEM
 256extern spinlock_t css_set_lock;
 257
 258static int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
 259                               unsigned int nr_pages);
 260static void __memcg_kmem_uncharge(struct mem_cgroup *memcg,
 261                                  unsigned int nr_pages);
 262
 263static void obj_cgroup_release(struct percpu_ref *ref)
 264{
 265        struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 266        struct mem_cgroup *memcg;
 267        unsigned int nr_bytes;
 268        unsigned int nr_pages;
 269        unsigned long flags;
 270
 271        /*
 272         * At this point all allocated objects are freed, and
 273         * objcg->nr_charged_bytes can't have an arbitrary byte value.
 274         * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 275         *
 276         * The following sequence can lead to it:
 277         * 1) CPU0: objcg == stock->cached_objcg
 278         * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 279         *          PAGE_SIZE bytes are charged
 280         * 3) CPU1: a process from another memcg is allocating something,
 281         *          the stock if flushed,
 282         *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 283         * 5) CPU0: we do release this object,
 284         *          92 bytes are added to stock->nr_bytes
 285         * 6) CPU0: stock is flushed,
 286         *          92 bytes are added to objcg->nr_charged_bytes
 287         *
 288         * In the result, nr_charged_bytes == PAGE_SIZE.
 289         * This page will be uncharged in obj_cgroup_release().
 290         */
 291        nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 292        WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 293        nr_pages = nr_bytes >> PAGE_SHIFT;
 294
 295        spin_lock_irqsave(&css_set_lock, flags);
 296        memcg = obj_cgroup_memcg(objcg);
 297        if (nr_pages)
 298                __memcg_kmem_uncharge(memcg, nr_pages);
 299        list_del(&objcg->list);
 300        mem_cgroup_put(memcg);
 301        spin_unlock_irqrestore(&css_set_lock, flags);
 302
 303        percpu_ref_exit(ref);
 304        kfree_rcu(objcg, rcu);
 305}
 306
 307static struct obj_cgroup *obj_cgroup_alloc(void)
 308{
 309        struct obj_cgroup *objcg;
 310        int ret;
 311
 312        objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 313        if (!objcg)
 314                return NULL;
 315
 316        ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 317                              GFP_KERNEL);
 318        if (ret) {
 319                kfree(objcg);
 320                return NULL;
 321        }
 322        INIT_LIST_HEAD(&objcg->list);
 323        return objcg;
 324}
 325
 326static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 327                                  struct mem_cgroup *parent)
 328{
 329        struct obj_cgroup *objcg, *iter;
 330
 331        objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 332
 333        spin_lock_irq(&css_set_lock);
 334
 335        /* Move active objcg to the parent's list */
 336        xchg(&objcg->memcg, parent);
 337        css_get(&parent->css);
 338        list_add(&objcg->list, &parent->objcg_list);
 339
 340        /* Move already reparented objcgs to the parent's list */
 341        list_for_each_entry(iter, &memcg->objcg_list, list) {
 342                css_get(&parent->css);
 343                xchg(&iter->memcg, parent);
 344                css_put(&memcg->css);
 345        }
 346        list_splice(&memcg->objcg_list, &parent->objcg_list);
 347
 348        spin_unlock_irq(&css_set_lock);
 349
 350        percpu_ref_kill(&objcg->refcnt);
 351}
 352
 353/*
 354 * This will be used as a shrinker list's index.
 355 * The main reason for not using cgroup id for this:
 356 *  this works better in sparse environments, where we have a lot of memcgs,
 357 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 358 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 359 *  200 entry array for that.
 360 *
 361 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 362 * will double each time we have to increase it.
 363 */
 364static DEFINE_IDA(memcg_cache_ida);
 365int memcg_nr_cache_ids;
 366
 367/* Protects memcg_nr_cache_ids */
 368static DECLARE_RWSEM(memcg_cache_ids_sem);
 369
 370void memcg_get_cache_ids(void)
 371{
 372        down_read(&memcg_cache_ids_sem);
 373}
 374
 375void memcg_put_cache_ids(void)
 376{
 377        up_read(&memcg_cache_ids_sem);
 378}
 379
 380/*
 381 * MIN_SIZE is different than 1, because we would like to avoid going through
 382 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 383 * cgroups is a reasonable guess. In the future, it could be a parameter or
 384 * tunable, but that is strictly not necessary.
 385 *
 386 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 387 * this constant directly from cgroup, but it is understandable that this is
 388 * better kept as an internal representation in cgroup.c. In any case, the
 389 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 390 * increase ours as well if it increases.
 391 */
 392#define MEMCG_CACHES_MIN_SIZE 4
 393#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 394
 395/*
 396 * A lot of the calls to the cache allocation functions are expected to be
 397 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 398 * conditional to this static branch, we'll have to allow modules that does
 399 * kmem_cache_alloc and the such to see this symbol as well
 400 */
 401DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 402EXPORT_SYMBOL(memcg_kmem_enabled_key);
 403#endif
 404
 405static int memcg_shrinker_map_size;
 406static DEFINE_MUTEX(memcg_shrinker_map_mutex);
 407
 408static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
 409{
 410        kvfree(container_of(head, struct memcg_shrinker_map, rcu));
 411}
 412
 413static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
 414                                         int size, int old_size)
 415{
 416        struct memcg_shrinker_map *new, *old;
 417        int nid;
 418
 419        lockdep_assert_held(&memcg_shrinker_map_mutex);
 420
 421        for_each_node(nid) {
 422                old = rcu_dereference_protected(
 423                        mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
 424                /* Not yet online memcg */
 425                if (!old)
 426                        return 0;
 427
 428                new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
 429                if (!new)
 430                        return -ENOMEM;
 431
 432                /* Set all old bits, clear all new bits */
 433                memset(new->map, (int)0xff, old_size);
 434                memset((void *)new->map + old_size, 0, size - old_size);
 435
 436                rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
 437                call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
 438        }
 439
 440        return 0;
 441}
 442
 443static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
 444{
 445        struct mem_cgroup_per_node *pn;
 446        struct memcg_shrinker_map *map;
 447        int nid;
 448
 449        if (mem_cgroup_is_root(memcg))
 450                return;
 451
 452        for_each_node(nid) {
 453                pn = mem_cgroup_nodeinfo(memcg, nid);
 454                map = rcu_dereference_protected(pn->shrinker_map, true);
 455                kvfree(map);
 456                rcu_assign_pointer(pn->shrinker_map, NULL);
 457        }
 458}
 459
 460static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
 461{
 462        struct memcg_shrinker_map *map;
 463        int nid, size, ret = 0;
 464
 465        if (mem_cgroup_is_root(memcg))
 466                return 0;
 467
 468        mutex_lock(&memcg_shrinker_map_mutex);
 469        size = memcg_shrinker_map_size;
 470        for_each_node(nid) {
 471                map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
 472                if (!map) {
 473                        memcg_free_shrinker_maps(memcg);
 474                        ret = -ENOMEM;
 475                        break;
 476                }
 477                rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
 478        }
 479        mutex_unlock(&memcg_shrinker_map_mutex);
 480
 481        return ret;
 482}
 483
 484int memcg_expand_shrinker_maps(int new_id)
 485{
 486        int size, old_size, ret = 0;
 487        struct mem_cgroup *memcg;
 488
 489        size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
 490        old_size = memcg_shrinker_map_size;
 491        if (size <= old_size)
 492                return 0;
 493
 494        mutex_lock(&memcg_shrinker_map_mutex);
 495        if (!root_mem_cgroup)
 496                goto unlock;
 497
 498        for_each_mem_cgroup(memcg) {
 499                if (mem_cgroup_is_root(memcg))
 500                        continue;
 501                ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
 502                if (ret) {
 503                        mem_cgroup_iter_break(NULL, memcg);
 504                        goto unlock;
 505                }
 506        }
 507unlock:
 508        if (!ret)
 509                memcg_shrinker_map_size = size;
 510        mutex_unlock(&memcg_shrinker_map_mutex);
 511        return ret;
 512}
 513
 514void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
 515{
 516        if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
 517                struct memcg_shrinker_map *map;
 518
 519                rcu_read_lock();
 520                map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
 521                /* Pairs with smp mb in shrink_slab() */
 522                smp_mb__before_atomic();
 523                set_bit(shrinker_id, map->map);
 524                rcu_read_unlock();
 525        }
 526}
 527
 528/**
 529 * mem_cgroup_css_from_page - css of the memcg associated with a page
 530 * @page: page of interest
 531 *
 532 * If memcg is bound to the default hierarchy, css of the memcg associated
 533 * with @page is returned.  The returned css remains associated with @page
 534 * until it is released.
 535 *
 536 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 537 * is returned.
 538 */
 539struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 540{
 541        struct mem_cgroup *memcg;
 542
 543        memcg = page_memcg(page);
 544
 545        if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 546                memcg = root_mem_cgroup;
 547
 548        return &memcg->css;
 549}
 550
 551/**
 552 * page_cgroup_ino - return inode number of the memcg a page is charged to
 553 * @page: the page
 554 *
 555 * Look up the closest online ancestor of the memory cgroup @page is charged to
 556 * and return its inode number or 0 if @page is not charged to any cgroup. It
 557 * is safe to call this function without holding a reference to @page.
 558 *
 559 * Note, this function is inherently racy, because there is nothing to prevent
 560 * the cgroup inode from getting torn down and potentially reallocated a moment
 561 * after page_cgroup_ino() returns, so it only should be used by callers that
 562 * do not care (such as procfs interfaces).
 563 */
 564ino_t page_cgroup_ino(struct page *page)
 565{
 566        struct mem_cgroup *memcg;
 567        unsigned long ino = 0;
 568
 569        rcu_read_lock();
 570        memcg = page_memcg_check(page);
 571
 572        while (memcg && !(memcg->css.flags & CSS_ONLINE))
 573                memcg = parent_mem_cgroup(memcg);
 574        if (memcg)
 575                ino = cgroup_ino(memcg->css.cgroup);
 576        rcu_read_unlock();
 577        return ino;
 578}
 579
 580static struct mem_cgroup_per_node *
 581mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
 582{
 583        int nid = page_to_nid(page);
 584
 585        return memcg->nodeinfo[nid];
 586}
 587
 588static struct mem_cgroup_tree_per_node *
 589soft_limit_tree_node(int nid)
 590{
 591        return soft_limit_tree.rb_tree_per_node[nid];
 592}
 593
 594static struct mem_cgroup_tree_per_node *
 595soft_limit_tree_from_page(struct page *page)
 596{
 597        int nid = page_to_nid(page);
 598
 599        return soft_limit_tree.rb_tree_per_node[nid];
 600}
 601
 602static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 603                                         struct mem_cgroup_tree_per_node *mctz,
 604                                         unsigned long new_usage_in_excess)
 605{
 606        struct rb_node **p = &mctz->rb_root.rb_node;
 607        struct rb_node *parent = NULL;
 608        struct mem_cgroup_per_node *mz_node;
 609        bool rightmost = true;
 610
 611        if (mz->on_tree)
 612                return;
 613
 614        mz->usage_in_excess = new_usage_in_excess;
 615        if (!mz->usage_in_excess)
 616                return;
 617        while (*p) {
 618                parent = *p;
 619                mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 620                                        tree_node);
 621                if (mz->usage_in_excess < mz_node->usage_in_excess) {
 622                        p = &(*p)->rb_left;
 623                        rightmost = false;
 624                } else {
 625                        p = &(*p)->rb_right;
 626                }
 627        }
 628
 629        if (rightmost)
 630                mctz->rb_rightmost = &mz->tree_node;
 631
 632        rb_link_node(&mz->tree_node, parent, p);
 633        rb_insert_color(&mz->tree_node, &mctz->rb_root);
 634        mz->on_tree = true;
 635}
 636
 637static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 638                                         struct mem_cgroup_tree_per_node *mctz)
 639{
 640        if (!mz->on_tree)
 641                return;
 642
 643        if (&mz->tree_node == mctz->rb_rightmost)
 644                mctz->rb_rightmost = rb_prev(&mz->tree_node);
 645
 646        rb_erase(&mz->tree_node, &mctz->rb_root);
 647        mz->on_tree = false;
 648}
 649
 650static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 651                                       struct mem_cgroup_tree_per_node *mctz)
 652{
 653        unsigned long flags;
 654
 655        spin_lock_irqsave(&mctz->lock, flags);
 656        __mem_cgroup_remove_exceeded(mz, mctz);
 657        spin_unlock_irqrestore(&mctz->lock, flags);
 658}
 659
 660static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 661{
 662        unsigned long nr_pages = page_counter_read(&memcg->memory);
 663        unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 664        unsigned long excess = 0;
 665
 666        if (nr_pages > soft_limit)
 667                excess = nr_pages - soft_limit;
 668
 669        return excess;
 670}
 671
 672static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 673{
 674        unsigned long excess;
 675        struct mem_cgroup_per_node *mz;
 676        struct mem_cgroup_tree_per_node *mctz;
 677
 678        mctz = soft_limit_tree_from_page(page);
 679        if (!mctz)
 680                return;
 681        /*
 682         * Necessary to update all ancestors when hierarchy is used.
 683         * because their event counter is not touched.
 684         */
 685        for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 686                mz = mem_cgroup_page_nodeinfo(memcg, page);
 687                excess = soft_limit_excess(memcg);
 688                /*
 689                 * We have to update the tree if mz is on RB-tree or
 690                 * mem is over its softlimit.
 691                 */
 692                if (excess || mz->on_tree) {
 693                        unsigned long flags;
 694
 695                        spin_lock_irqsave(&mctz->lock, flags);
 696                        /* if on-tree, remove it */
 697                        if (mz->on_tree)
 698                                __mem_cgroup_remove_exceeded(mz, mctz);
 699                        /*
 700                         * Insert again. mz->usage_in_excess will be updated.
 701                         * If excess is 0, no tree ops.
 702                         */
 703                        __mem_cgroup_insert_exceeded(mz, mctz, excess);
 704                        spin_unlock_irqrestore(&mctz->lock, flags);
 705                }
 706        }
 707}
 708
 709static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 710{
 711        struct mem_cgroup_tree_per_node *mctz;
 712        struct mem_cgroup_per_node *mz;
 713        int nid;
 714
 715        for_each_node(nid) {
 716                mz = mem_cgroup_nodeinfo(memcg, nid);
 717                mctz = soft_limit_tree_node(nid);
 718                if (mctz)
 719                        mem_cgroup_remove_exceeded(mz, mctz);
 720        }
 721}
 722
 723static struct mem_cgroup_per_node *
 724__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 725{
 726        struct mem_cgroup_per_node *mz;
 727
 728retry:
 729        mz = NULL;
 730        if (!mctz->rb_rightmost)
 731                goto done;              /* Nothing to reclaim from */
 732
 733        mz = rb_entry(mctz->rb_rightmost,
 734                      struct mem_cgroup_per_node, tree_node);
 735        /*
 736         * Remove the node now but someone else can add it back,
 737         * we will to add it back at the end of reclaim to its correct
 738         * position in the tree.
 739         */
 740        __mem_cgroup_remove_exceeded(mz, mctz);
 741        if (!soft_limit_excess(mz->memcg) ||
 742            !css_tryget(&mz->memcg->css))
 743                goto retry;
 744done:
 745        return mz;
 746}
 747
 748static struct mem_cgroup_per_node *
 749mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 750{
 751        struct mem_cgroup_per_node *mz;
 752
 753        spin_lock_irq(&mctz->lock);
 754        mz = __mem_cgroup_largest_soft_limit_node(mctz);
 755        spin_unlock_irq(&mctz->lock);
 756        return mz;
 757}
 758
 759/**
 760 * __mod_memcg_state - update cgroup memory statistics
 761 * @memcg: the memory cgroup
 762 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 763 * @val: delta to add to the counter, can be negative
 764 */
 765void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 766{
 767        long x, threshold = MEMCG_CHARGE_BATCH;
 768
 769        if (mem_cgroup_disabled())
 770                return;
 771
 772        if (memcg_stat_item_in_bytes(idx))
 773                threshold <<= PAGE_SHIFT;
 774
 775        x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
 776        if (unlikely(abs(x) > threshold)) {
 777                struct mem_cgroup *mi;
 778
 779                /*
 780                 * Batch local counters to keep them in sync with
 781                 * the hierarchical ones.
 782                 */
 783                __this_cpu_add(memcg->vmstats_local->stat[idx], x);
 784                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
 785                        atomic_long_add(x, &mi->vmstats[idx]);
 786                x = 0;
 787        }
 788        __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
 789}
 790
 791static struct mem_cgroup_per_node *
 792parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
 793{
 794        struct mem_cgroup *parent;
 795
 796        parent = parent_mem_cgroup(pn->memcg);
 797        if (!parent)
 798                return NULL;
 799        return mem_cgroup_nodeinfo(parent, nid);
 800}
 801
 802void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 803                              int val)
 804{
 805        struct mem_cgroup_per_node *pn;
 806        struct mem_cgroup *memcg;
 807        long x, threshold = MEMCG_CHARGE_BATCH;
 808
 809        pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 810        memcg = pn->memcg;
 811
 812        /* Update memcg */
 813        __mod_memcg_state(memcg, idx, val);
 814
 815        /* Update lruvec */
 816        __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
 817
 818        if (vmstat_item_in_bytes(idx))
 819                threshold <<= PAGE_SHIFT;
 820
 821        x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
 822        if (unlikely(abs(x) > threshold)) {
 823                pg_data_t *pgdat = lruvec_pgdat(lruvec);
 824                struct mem_cgroup_per_node *pi;
 825
 826                for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
 827                        atomic_long_add(x, &pi->lruvec_stat[idx]);
 828                x = 0;
 829        }
 830        __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
 831}
 832
 833/**
 834 * __mod_lruvec_state - update lruvec memory statistics
 835 * @lruvec: the lruvec
 836 * @idx: the stat item
 837 * @val: delta to add to the counter, can be negative
 838 *
 839 * The lruvec is the intersection of the NUMA node and a cgroup. This
 840 * function updates the all three counters that are affected by a
 841 * change of state at this level: per-node, per-cgroup, per-lruvec.
 842 */
 843void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 844                        int val)
 845{
 846        /* Update node */
 847        __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 848
 849        /* Update memcg and lruvec */
 850        if (!mem_cgroup_disabled())
 851                __mod_memcg_lruvec_state(lruvec, idx, val);
 852}
 853
 854void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
 855                             int val)
 856{
 857        struct page *head = compound_head(page); /* rmap on tail pages */
 858        struct mem_cgroup *memcg = page_memcg(head);
 859        pg_data_t *pgdat = page_pgdat(page);
 860        struct lruvec *lruvec;
 861
 862        /* Untracked pages have no memcg, no lruvec. Update only the node */
 863        if (!memcg) {
 864                __mod_node_page_state(pgdat, idx, val);
 865                return;
 866        }
 867
 868        lruvec = mem_cgroup_lruvec(memcg, pgdat);
 869        __mod_lruvec_state(lruvec, idx, val);
 870}
 871EXPORT_SYMBOL(__mod_lruvec_page_state);
 872
 873void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 874{
 875        pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 876        struct mem_cgroup *memcg;
 877        struct lruvec *lruvec;
 878
 879        rcu_read_lock();
 880        memcg = mem_cgroup_from_obj(p);
 881
 882        /*
 883         * Untracked pages have no memcg, no lruvec. Update only the
 884         * node. If we reparent the slab objects to the root memcg,
 885         * when we free the slab object, we need to update the per-memcg
 886         * vmstats to keep it correct for the root memcg.
 887         */
 888        if (!memcg) {
 889                __mod_node_page_state(pgdat, idx, val);
 890        } else {
 891                lruvec = mem_cgroup_lruvec(memcg, pgdat);
 892                __mod_lruvec_state(lruvec, idx, val);
 893        }
 894        rcu_read_unlock();
 895}
 896
 897/**
 898 * __count_memcg_events - account VM events in a cgroup
 899 * @memcg: the memory cgroup
 900 * @idx: the event item
 901 * @count: the number of events that occured
 902 */
 903void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 904                          unsigned long count)
 905{
 906        unsigned long x;
 907
 908        if (mem_cgroup_disabled())
 909                return;
 910
 911        x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
 912        if (unlikely(x > MEMCG_CHARGE_BATCH)) {
 913                struct mem_cgroup *mi;
 914
 915                /*
 916                 * Batch local counters to keep them in sync with
 917                 * the hierarchical ones.
 918                 */
 919                __this_cpu_add(memcg->vmstats_local->events[idx], x);
 920                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
 921                        atomic_long_add(x, &mi->vmevents[idx]);
 922                x = 0;
 923        }
 924        __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
 925}
 926
 927static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 928{
 929        return atomic_long_read(&memcg->vmevents[event]);
 930}
 931
 932static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 933{
 934        long x = 0;
 935        int cpu;
 936
 937        for_each_possible_cpu(cpu)
 938                x += per_cpu(memcg->vmstats_local->events[event], cpu);
 939        return x;
 940}
 941
 942static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 943                                         struct page *page,
 944                                         int nr_pages)
 945{
 946        /* pagein of a big page is an event. So, ignore page size */
 947        if (nr_pages > 0)
 948                __count_memcg_events(memcg, PGPGIN, 1);
 949        else {
 950                __count_memcg_events(memcg, PGPGOUT, 1);
 951                nr_pages = -nr_pages; /* for event */
 952        }
 953
 954        __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 955}
 956
 957static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 958                                       enum mem_cgroup_events_target target)
 959{
 960        unsigned long val, next;
 961
 962        val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
 963        next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
 964        /* from time_after() in jiffies.h */
 965        if ((long)(next - val) < 0) {
 966                switch (target) {
 967                case MEM_CGROUP_TARGET_THRESH:
 968                        next = val + THRESHOLDS_EVENTS_TARGET;
 969                        break;
 970                case MEM_CGROUP_TARGET_SOFTLIMIT:
 971                        next = val + SOFTLIMIT_EVENTS_TARGET;
 972                        break;
 973                default:
 974                        break;
 975                }
 976                __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
 977                return true;
 978        }
 979        return false;
 980}
 981
 982/*
 983 * Check events in order.
 984 *
 985 */
 986static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 987{
 988        /* threshold event is triggered in finer grain than soft limit */
 989        if (unlikely(mem_cgroup_event_ratelimit(memcg,
 990                                                MEM_CGROUP_TARGET_THRESH))) {
 991                bool do_softlimit;
 992
 993                do_softlimit = mem_cgroup_event_ratelimit(memcg,
 994                                                MEM_CGROUP_TARGET_SOFTLIMIT);
 995                mem_cgroup_threshold(memcg);
 996                if (unlikely(do_softlimit))
 997                        mem_cgroup_update_tree(memcg, page);
 998        }
 999}
1000
1001struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1002{
1003        /*
1004         * mm_update_next_owner() may clear mm->owner to NULL
1005         * if it races with swapoff, page migration, etc.
1006         * So this can be called with p == NULL.
1007         */
1008        if (unlikely(!p))
1009                return NULL;
1010
1011        return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1012}
1013EXPORT_SYMBOL(mem_cgroup_from_task);
1014
1015/**
1016 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1017 * @mm: mm from which memcg should be extracted. It can be NULL.
1018 *
1019 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1020 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1021 * returned.
1022 */
1023struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1024{
1025        struct mem_cgroup *memcg;
1026
1027        if (mem_cgroup_disabled())
1028                return NULL;
1029
1030        rcu_read_lock();
1031        do {
1032                /*
1033                 * Page cache insertions can happen withou an
1034                 * actual mm context, e.g. during disk probing
1035                 * on boot, loopback IO, acct() writes etc.
1036                 */
1037                if (unlikely(!mm))
1038                        memcg = root_mem_cgroup;
1039                else {
1040                        memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1041                        if (unlikely(!memcg))
1042                                memcg = root_mem_cgroup;
1043                }
1044        } while (!css_tryget(&memcg->css));
1045        rcu_read_unlock();
1046        return memcg;
1047}
1048EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1049
1050static __always_inline struct mem_cgroup *active_memcg(void)
1051{
1052        if (in_interrupt())
1053                return this_cpu_read(int_active_memcg);
1054        else
1055                return current->active_memcg;
1056}
1057
1058static __always_inline struct mem_cgroup *get_active_memcg(void)
1059{
1060        struct mem_cgroup *memcg;
1061
1062        rcu_read_lock();
1063        memcg = active_memcg();
1064        /* remote memcg must hold a ref. */
1065        if (memcg && WARN_ON_ONCE(!css_tryget(&memcg->css)))
1066                memcg = root_mem_cgroup;
1067        rcu_read_unlock();
1068
1069        return memcg;
1070}
1071
1072static __always_inline bool memcg_kmem_bypass(void)
1073{
1074        /* Allow remote memcg charging from any context. */
1075        if (unlikely(active_memcg()))
1076                return false;
1077
1078        /* Memcg to charge can't be determined. */
1079        if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
1080                return true;
1081
1082        return false;
1083}
1084
1085/**
1086 * If active memcg is set, do not fallback to current->mm->memcg.
1087 */
1088static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1089{
1090        if (memcg_kmem_bypass())
1091                return NULL;
1092
1093        if (unlikely(active_memcg()))
1094                return get_active_memcg();
1095
1096        return get_mem_cgroup_from_mm(current->mm);
1097}
1098
1099/**
1100 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1101 * @root: hierarchy root
1102 * @prev: previously returned memcg, NULL on first invocation
1103 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1104 *
1105 * Returns references to children of the hierarchy below @root, or
1106 * @root itself, or %NULL after a full round-trip.
1107 *
1108 * Caller must pass the return value in @prev on subsequent
1109 * invocations for reference counting, or use mem_cgroup_iter_break()
1110 * to cancel a hierarchy walk before the round-trip is complete.
1111 *
1112 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1113 * in the hierarchy among all concurrent reclaimers operating on the
1114 * same node.
1115 */
1116struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1117                                   struct mem_cgroup *prev,
1118                                   struct mem_cgroup_reclaim_cookie *reclaim)
1119{
1120        struct mem_cgroup_reclaim_iter *iter;
1121        struct cgroup_subsys_state *css = NULL;
1122        struct mem_cgroup *memcg = NULL;
1123        struct mem_cgroup *pos = NULL;
1124
1125        if (mem_cgroup_disabled())
1126                return NULL;
1127
1128        if (!root)
1129                root = root_mem_cgroup;
1130
1131        if (prev && !reclaim)
1132                pos = prev;
1133
1134        rcu_read_lock();
1135
1136        if (reclaim) {
1137                struct mem_cgroup_per_node *mz;
1138
1139                mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1140                iter = &mz->iter;
1141
1142                if (prev && reclaim->generation != iter->generation)
1143                        goto out_unlock;
1144
1145                while (1) {
1146                        pos = READ_ONCE(iter->position);
1147                        if (!pos || css_tryget(&pos->css))
1148                                break;
1149                        /*
1150                         * css reference reached zero, so iter->position will
1151                         * be cleared by ->css_released. However, we should not
1152                         * rely on this happening soon, because ->css_released
1153                         * is called from a work queue, and by busy-waiting we
1154                         * might block it. So we clear iter->position right
1155                         * away.
1156                         */
1157                        (void)cmpxchg(&iter->position, pos, NULL);
1158                }
1159        }
1160
1161        if (pos)
1162                css = &pos->css;
1163
1164        for (;;) {
1165                css = css_next_descendant_pre(css, &root->css);
1166                if (!css) {
1167                        /*
1168                         * Reclaimers share the hierarchy walk, and a
1169                         * new one might jump in right at the end of
1170                         * the hierarchy - make sure they see at least
1171                         * one group and restart from the beginning.
1172                         */
1173                        if (!prev)
1174                                continue;
1175                        break;
1176                }
1177
1178                /*
1179                 * Verify the css and acquire a reference.  The root
1180                 * is provided by the caller, so we know it's alive
1181                 * and kicking, and don't take an extra reference.
1182                 */
1183                memcg = mem_cgroup_from_css(css);
1184
1185                if (css == &root->css)
1186                        break;
1187
1188                if (css_tryget(css))
1189                        break;
1190
1191                memcg = NULL;
1192        }
1193
1194        if (reclaim) {
1195                /*
1196                 * The position could have already been updated by a competing
1197                 * thread, so check that the value hasn't changed since we read
1198                 * it to avoid reclaiming from the same cgroup twice.
1199                 */
1200                (void)cmpxchg(&iter->position, pos, memcg);
1201
1202                if (pos)
1203                        css_put(&pos->css);
1204
1205                if (!memcg)
1206                        iter->generation++;
1207                else if (!prev)
1208                        reclaim->generation = iter->generation;
1209        }
1210
1211out_unlock:
1212        rcu_read_unlock();
1213        if (prev && prev != root)
1214                css_put(&prev->css);
1215
1216        return memcg;
1217}
1218
1219/**
1220 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1221 * @root: hierarchy root
1222 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1223 */
1224void mem_cgroup_iter_break(struct mem_cgroup *root,
1225                           struct mem_cgroup *prev)
1226{
1227        if (!root)
1228                root = root_mem_cgroup;
1229        if (prev && prev != root)
1230                css_put(&prev->css);
1231}
1232
1233static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1234                                        struct mem_cgroup *dead_memcg)
1235{
1236        struct mem_cgroup_reclaim_iter *iter;
1237        struct mem_cgroup_per_node *mz;
1238        int nid;
1239
1240        for_each_node(nid) {
1241                mz = mem_cgroup_nodeinfo(from, nid);
1242                iter = &mz->iter;
1243                cmpxchg(&iter->position, dead_memcg, NULL);
1244        }
1245}
1246
1247static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1248{
1249        struct mem_cgroup *memcg = dead_memcg;
1250        struct mem_cgroup *last;
1251
1252        do {
1253                __invalidate_reclaim_iterators(memcg, dead_memcg);
1254                last = memcg;
1255        } while ((memcg = parent_mem_cgroup(memcg)));
1256
1257        /*
1258         * When cgruop1 non-hierarchy mode is used,
1259         * parent_mem_cgroup() does not walk all the way up to the
1260         * cgroup root (root_mem_cgroup). So we have to handle
1261         * dead_memcg from cgroup root separately.
1262         */
1263        if (last != root_mem_cgroup)
1264                __invalidate_reclaim_iterators(root_mem_cgroup,
1265                                                dead_memcg);
1266}
1267
1268/**
1269 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1270 * @memcg: hierarchy root
1271 * @fn: function to call for each task
1272 * @arg: argument passed to @fn
1273 *
1274 * This function iterates over tasks attached to @memcg or to any of its
1275 * descendants and calls @fn for each task. If @fn returns a non-zero
1276 * value, the function breaks the iteration loop and returns the value.
1277 * Otherwise, it will iterate over all tasks and return 0.
1278 *
1279 * This function must not be called for the root memory cgroup.
1280 */
1281int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1282                          int (*fn)(struct task_struct *, void *), void *arg)
1283{
1284        struct mem_cgroup *iter;
1285        int ret = 0;
1286
1287        BUG_ON(memcg == root_mem_cgroup);
1288
1289        for_each_mem_cgroup_tree(iter, memcg) {
1290                struct css_task_iter it;
1291                struct task_struct *task;
1292
1293                css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1294                while (!ret && (task = css_task_iter_next(&it)))
1295                        ret = fn(task, arg);
1296                css_task_iter_end(&it);
1297                if (ret) {
1298                        mem_cgroup_iter_break(memcg, iter);
1299                        break;
1300                }
1301        }
1302        return ret;
1303}
1304
1305#ifdef CONFIG_DEBUG_VM
1306void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1307{
1308        struct mem_cgroup *memcg;
1309
1310        if (mem_cgroup_disabled())
1311                return;
1312
1313        memcg = page_memcg(page);
1314
1315        if (!memcg)
1316                VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
1317        else
1318                VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
1319}
1320#endif
1321
1322/**
1323 * lock_page_lruvec - lock and return lruvec for a given page.
1324 * @page: the page
1325 *
1326 * These functions are safe to use under any of the following conditions:
1327 * - page locked
1328 * - PageLRU cleared
1329 * - lock_page_memcg()
1330 * - page->_refcount is zero
1331 */
1332struct lruvec *lock_page_lruvec(struct page *page)
1333{
1334        struct lruvec *lruvec;
1335        struct pglist_data *pgdat = page_pgdat(page);
1336
1337        lruvec = mem_cgroup_page_lruvec(page, pgdat);
1338        spin_lock(&lruvec->lru_lock);
1339
1340        lruvec_memcg_debug(lruvec, page);
1341
1342        return lruvec;
1343}
1344
1345struct lruvec *lock_page_lruvec_irq(struct page *page)
1346{
1347        struct lruvec *lruvec;
1348        struct pglist_data *pgdat = page_pgdat(page);
1349
1350        lruvec = mem_cgroup_page_lruvec(page, pgdat);
1351        spin_lock_irq(&lruvec->lru_lock);
1352
1353        lruvec_memcg_debug(lruvec, page);
1354
1355        return lruvec;
1356}
1357
1358struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
1359{
1360        struct lruvec *lruvec;
1361        struct pglist_data *pgdat = page_pgdat(page);
1362
1363        lruvec = mem_cgroup_page_lruvec(page, pgdat);
1364        spin_lock_irqsave(&lruvec->lru_lock, *flags);
1365
1366        lruvec_memcg_debug(lruvec, page);
1367
1368        return lruvec;
1369}
1370
1371/**
1372 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1373 * @lruvec: mem_cgroup per zone lru vector
1374 * @lru: index of lru list the page is sitting on
1375 * @zid: zone id of the accounted pages
1376 * @nr_pages: positive when adding or negative when removing
1377 *
1378 * This function must be called under lru_lock, just before a page is added
1379 * to or just after a page is removed from an lru list (that ordering being
1380 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1381 */
1382void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1383                                int zid, int nr_pages)
1384{
1385        struct mem_cgroup_per_node *mz;
1386        unsigned long *lru_size;
1387        long size;
1388
1389        if (mem_cgroup_disabled())
1390                return;
1391
1392        mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1393        lru_size = &mz->lru_zone_size[zid][lru];
1394
1395        if (nr_pages < 0)
1396                *lru_size += nr_pages;
1397
1398        size = *lru_size;
1399        if (WARN_ONCE(size < 0,
1400                "%s(%p, %d, %d): lru_size %ld\n",
1401                __func__, lruvec, lru, nr_pages, size)) {
1402                VM_BUG_ON(1);
1403                *lru_size = 0;
1404        }
1405
1406        if (nr_pages > 0)
1407                *lru_size += nr_pages;
1408}
1409
1410/**
1411 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1412 * @memcg: the memory cgroup
1413 *
1414 * Returns the maximum amount of memory @mem can be charged with, in
1415 * pages.
1416 */
1417static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1418{
1419        unsigned long margin = 0;
1420        unsigned long count;
1421        unsigned long limit;
1422
1423        count = page_counter_read(&memcg->memory);
1424        limit = READ_ONCE(memcg->memory.max);
1425        if (count < limit)
1426                margin = limit - count;
1427
1428        if (do_memsw_account()) {
1429                count = page_counter_read(&memcg->memsw);
1430                limit = READ_ONCE(memcg->memsw.max);
1431                if (count < limit)
1432                        margin = min(margin, limit - count);
1433                else
1434                        margin = 0;
1435        }
1436
1437        return margin;
1438}
1439
1440/*
1441 * A routine for checking "mem" is under move_account() or not.
1442 *
1443 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1444 * moving cgroups. This is for waiting at high-memory pressure
1445 * caused by "move".
1446 */
1447static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1448{
1449        struct mem_cgroup *from;
1450        struct mem_cgroup *to;
1451        bool ret = false;
1452        /*
1453         * Unlike task_move routines, we access mc.to, mc.from not under
1454         * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1455         */
1456        spin_lock(&mc.lock);
1457        from = mc.from;
1458        to = mc.to;
1459        if (!from)
1460                goto unlock;
1461
1462        ret = mem_cgroup_is_descendant(from, memcg) ||
1463                mem_cgroup_is_descendant(to, memcg);
1464unlock:
1465        spin_unlock(&mc.lock);
1466        return ret;
1467}
1468
1469static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1470{
1471        if (mc.moving_task && current != mc.moving_task) {
1472                if (mem_cgroup_under_move(memcg)) {
1473                        DEFINE_WAIT(wait);
1474                        prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1475                        /* moving charge context might have finished. */
1476                        if (mc.moving_task)
1477                                schedule();
1478                        finish_wait(&mc.waitq, &wait);
1479                        return true;
1480                }
1481        }
1482        return false;
1483}
1484
1485struct memory_stat {
1486        const char *name;
1487        unsigned int idx;
1488};
1489
1490static const struct memory_stat memory_stats[] = {
1491        { "anon",                       NR_ANON_MAPPED                  },
1492        { "file",                       NR_FILE_PAGES                   },
1493        { "kernel_stack",               NR_KERNEL_STACK_KB              },
1494        { "pagetables",                 NR_PAGETABLE                    },
1495        { "percpu",                     MEMCG_PERCPU_B                  },
1496        { "sock",                       MEMCG_SOCK                      },
1497        { "shmem",                      NR_SHMEM                        },
1498        { "file_mapped",                NR_FILE_MAPPED                  },
1499        { "file_dirty",                 NR_FILE_DIRTY                   },
1500        { "file_writeback",             NR_WRITEBACK                    },
1501#ifdef CONFIG_SWAP
1502        { "swapcached",                 NR_SWAPCACHE                    },
1503#endif
1504#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1505        { "anon_thp",                   NR_ANON_THPS                    },
1506        { "file_thp",                   NR_FILE_THPS                    },
1507        { "shmem_thp",                  NR_SHMEM_THPS                   },
1508#endif
1509        { "inactive_anon",              NR_INACTIVE_ANON                },
1510        { "active_anon",                NR_ACTIVE_ANON                  },
1511        { "inactive_file",              NR_INACTIVE_FILE                },
1512        { "active_file",                NR_ACTIVE_FILE                  },
1513        { "unevictable",                NR_UNEVICTABLE                  },
1514        { "slab_reclaimable",           NR_SLAB_RECLAIMABLE_B           },
1515        { "slab_unreclaimable",         NR_SLAB_UNRECLAIMABLE_B         },
1516
1517        /* The memory events */
1518        { "workingset_refault_anon",    WORKINGSET_REFAULT_ANON         },
1519        { "workingset_refault_file",    WORKINGSET_REFAULT_FILE         },
1520        { "workingset_activate_anon",   WORKINGSET_ACTIVATE_ANON        },
1521        { "workingset_activate_file",   WORKINGSET_ACTIVATE_FILE        },
1522        { "workingset_restore_anon",    WORKINGSET_RESTORE_ANON         },
1523        { "workingset_restore_file",    WORKINGSET_RESTORE_FILE         },
1524        { "workingset_nodereclaim",     WORKINGSET_NODERECLAIM          },
1525};
1526
1527/* Translate stat items to the correct unit for memory.stat output */
1528static int memcg_page_state_unit(int item)
1529{
1530        switch (item) {
1531        case MEMCG_PERCPU_B:
1532        case NR_SLAB_RECLAIMABLE_B:
1533        case NR_SLAB_UNRECLAIMABLE_B:
1534        case WORKINGSET_REFAULT_ANON:
1535        case WORKINGSET_REFAULT_FILE:
1536        case WORKINGSET_ACTIVATE_ANON:
1537        case WORKINGSET_ACTIVATE_FILE:
1538        case WORKINGSET_RESTORE_ANON:
1539        case WORKINGSET_RESTORE_FILE:
1540        case WORKINGSET_NODERECLAIM:
1541                return 1;
1542        case NR_KERNEL_STACK_KB:
1543                return SZ_1K;
1544        default:
1545                return PAGE_SIZE;
1546        }
1547}
1548
1549static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1550                                                    int item)
1551{
1552        return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1553}
1554
1555static char *memory_stat_format(struct mem_cgroup *memcg)
1556{
1557        struct seq_buf s;
1558        int i;
1559
1560        seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1561        if (!s.buffer)
1562                return NULL;
1563
1564        /*
1565         * Provide statistics on the state of the memory subsystem as
1566         * well as cumulative event counters that show past behavior.
1567         *
1568         * This list is ordered following a combination of these gradients:
1569         * 1) generic big picture -> specifics and details
1570         * 2) reflecting userspace activity -> reflecting kernel heuristics
1571         *
1572         * Current memory state:
1573         */
1574
1575        for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1576                u64 size;
1577
1578                size = memcg_page_state_output(memcg, memory_stats[i].idx);
1579                seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1580
1581                if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1582                        size += memcg_page_state_output(memcg,
1583                                                        NR_SLAB_RECLAIMABLE_B);
1584                        seq_buf_printf(&s, "slab %llu\n", size);
1585                }
1586        }
1587
1588        /* Accumulated memory events */
1589
1590        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1591                       memcg_events(memcg, PGFAULT));
1592        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1593                       memcg_events(memcg, PGMAJFAULT));
1594        seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1595                       memcg_events(memcg, PGREFILL));
1596        seq_buf_printf(&s, "pgscan %lu\n",
1597                       memcg_events(memcg, PGSCAN_KSWAPD) +
1598                       memcg_events(memcg, PGSCAN_DIRECT));
1599        seq_buf_printf(&s, "pgsteal %lu\n",
1600                       memcg_events(memcg, PGSTEAL_KSWAPD) +
1601                       memcg_events(memcg, PGSTEAL_DIRECT));
1602        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1603                       memcg_events(memcg, PGACTIVATE));
1604        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1605                       memcg_events(memcg, PGDEACTIVATE));
1606        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1607                       memcg_events(memcg, PGLAZYFREE));
1608        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1609                       memcg_events(memcg, PGLAZYFREED));
1610
1611#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1612        seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1613                       memcg_events(memcg, THP_FAULT_ALLOC));
1614        seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1615                       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1616#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1617
1618        /* The above should easily fit into one page */
1619        WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1620
1621        return s.buffer;
1622}
1623
1624#define K(x) ((x) << (PAGE_SHIFT-10))
1625/**
1626 * mem_cgroup_print_oom_context: Print OOM information relevant to
1627 * memory controller.
1628 * @memcg: The memory cgroup that went over limit
1629 * @p: Task that is going to be killed
1630 *
1631 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1632 * enabled
1633 */
1634void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1635{
1636        rcu_read_lock();
1637
1638        if (memcg) {
1639                pr_cont(",oom_memcg=");
1640                pr_cont_cgroup_path(memcg->css.cgroup);
1641        } else
1642                pr_cont(",global_oom");
1643        if (p) {
1644                pr_cont(",task_memcg=");
1645                pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1646        }
1647        rcu_read_unlock();
1648}
1649
1650/**
1651 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1652 * memory controller.
1653 * @memcg: The memory cgroup that went over limit
1654 */
1655void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1656{
1657        char *buf;
1658
1659        pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1660                K((u64)page_counter_read(&memcg->memory)),
1661                K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1662        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1663                pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1664                        K((u64)page_counter_read(&memcg->swap)),
1665                        K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1666        else {
1667                pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1668                        K((u64)page_counter_read(&memcg->memsw)),
1669                        K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1670                pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1671                        K((u64)page_counter_read(&memcg->kmem)),
1672                        K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1673        }
1674
1675        pr_info("Memory cgroup stats for ");
1676        pr_cont_cgroup_path(memcg->css.cgroup);
1677        pr_cont(":");
1678        buf = memory_stat_format(memcg);
1679        if (!buf)
1680                return;
1681        pr_info("%s", buf);
1682        kfree(buf);
1683}
1684
1685/*
1686 * Return the memory (and swap, if configured) limit for a memcg.
1687 */
1688unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1689{
1690        unsigned long max = READ_ONCE(memcg->memory.max);
1691
1692        if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1693                if (mem_cgroup_swappiness(memcg))
1694                        max += min(READ_ONCE(memcg->swap.max),
1695                                   (unsigned long)total_swap_pages);
1696        } else { /* v1 */
1697                if (mem_cgroup_swappiness(memcg)) {
1698                        /* Calculate swap excess capacity from memsw limit */
1699                        unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1700
1701                        max += min(swap, (unsigned long)total_swap_pages);
1702                }
1703        }
1704        return max;
1705}
1706
1707unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1708{
1709        return page_counter_read(&memcg->memory);
1710}
1711
1712static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1713                                     int order)
1714{
1715        struct oom_control oc = {
1716                .zonelist = NULL,
1717                .nodemask = NULL,
1718                .memcg = memcg,
1719                .gfp_mask = gfp_mask,
1720                .order = order,
1721        };
1722        bool ret = true;
1723
1724        if (mutex_lock_killable(&oom_lock))
1725                return true;
1726
1727        if (mem_cgroup_margin(memcg) >= (1 << order))
1728                goto unlock;
1729
1730        /*
1731         * A few threads which were not waiting at mutex_lock_killable() can
1732         * fail to bail out. Therefore, check again after holding oom_lock.
1733         */
1734        ret = should_force_charge() || out_of_memory(&oc);
1735
1736unlock:
1737        mutex_unlock(&oom_lock);
1738        return ret;
1739}
1740
1741static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1742                                   pg_data_t *pgdat,
1743                                   gfp_t gfp_mask,
1744                                   unsigned long *total_scanned)
1745{
1746        struct mem_cgroup *victim = NULL;
1747        int total = 0;
1748        int loop = 0;
1749        unsigned long excess;
1750        unsigned long nr_scanned;
1751        struct mem_cgroup_reclaim_cookie reclaim = {
1752                .pgdat = pgdat,
1753        };
1754
1755        excess = soft_limit_excess(root_memcg);
1756
1757        while (1) {
1758                victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1759                if (!victim) {
1760                        loop++;
1761                        if (loop >= 2) {
1762                                /*
1763                                 * If we have not been able to reclaim
1764                                 * anything, it might because there are
1765                                 * no reclaimable pages under this hierarchy
1766                                 */
1767                                if (!total)
1768                                        break;
1769                                /*
1770                                 * We want to do more targeted reclaim.
1771                                 * excess >> 2 is not to excessive so as to
1772                                 * reclaim too much, nor too less that we keep
1773                                 * coming back to reclaim from this cgroup
1774                                 */
1775                                if (total >= (excess >> 2) ||
1776                                        (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1777                                        break;
1778                        }
1779                        continue;
1780                }
1781                total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1782                                        pgdat, &nr_scanned);
1783                *total_scanned += nr_scanned;
1784                if (!soft_limit_excess(root_memcg))
1785                        break;
1786        }
1787        mem_cgroup_iter_break(root_memcg, victim);
1788        return total;
1789}
1790
1791#ifdef CONFIG_LOCKDEP
1792static struct lockdep_map memcg_oom_lock_dep_map = {
1793        .name = "memcg_oom_lock",
1794};
1795#endif
1796
1797static DEFINE_SPINLOCK(memcg_oom_lock);
1798
1799/*
1800 * Check OOM-Killer is already running under our hierarchy.
1801 * If someone is running, return false.
1802 */
1803static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1804{
1805        struct mem_cgroup *iter, *failed = NULL;
1806
1807        spin_lock(&memcg_oom_lock);
1808
1809        for_each_mem_cgroup_tree(iter, memcg) {
1810                if (iter->oom_lock) {
1811                        /*
1812                         * this subtree of our hierarchy is already locked
1813                         * so we cannot give a lock.
1814                         */
1815                        failed = iter;
1816                        mem_cgroup_iter_break(memcg, iter);
1817                        break;
1818                } else
1819                        iter->oom_lock = true;
1820        }
1821
1822        if (failed) {
1823                /*
1824                 * OK, we failed to lock the whole subtree so we have
1825                 * to clean up what we set up to the failing subtree
1826                 */
1827                for_each_mem_cgroup_tree(iter, memcg) {
1828                        if (iter == failed) {
1829                                mem_cgroup_iter_break(memcg, iter);
1830                                break;
1831                        }
1832                        iter->oom_lock = false;
1833                }
1834        } else
1835                mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1836
1837        spin_unlock(&memcg_oom_lock);
1838
1839        return !failed;
1840}
1841
1842static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1843{
1844        struct mem_cgroup *iter;
1845
1846        spin_lock(&memcg_oom_lock);
1847        mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1848        for_each_mem_cgroup_tree(iter, memcg)
1849                iter->oom_lock = false;
1850        spin_unlock(&memcg_oom_lock);
1851}
1852
1853static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1854{
1855        struct mem_cgroup *iter;
1856
1857        spin_lock(&memcg_oom_lock);
1858        for_each_mem_cgroup_tree(iter, memcg)
1859                iter->under_oom++;
1860        spin_unlock(&memcg_oom_lock);
1861}
1862
1863static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1864{
1865        struct mem_cgroup *iter;
1866
1867        /*
1868         * Be careful about under_oom underflows becase a child memcg
1869         * could have been added after mem_cgroup_mark_under_oom.
1870         */
1871        spin_lock(&memcg_oom_lock);
1872        for_each_mem_cgroup_tree(iter, memcg)
1873                if (iter->under_oom > 0)
1874                        iter->under_oom--;
1875        spin_unlock(&memcg_oom_lock);
1876}
1877
1878static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1879
1880struct oom_wait_info {
1881        struct mem_cgroup *memcg;
1882        wait_queue_entry_t      wait;
1883};
1884
1885static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1886        unsigned mode, int sync, void *arg)
1887{
1888        struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1889        struct mem_cgroup *oom_wait_memcg;
1890        struct oom_wait_info *oom_wait_info;
1891
1892        oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1893        oom_wait_memcg = oom_wait_info->memcg;
1894
1895        if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1896            !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1897                return 0;
1898        return autoremove_wake_function(wait, mode, sync, arg);
1899}
1900
1901static void memcg_oom_recover(struct mem_cgroup *memcg)
1902{
1903        /*
1904         * For the following lockless ->under_oom test, the only required
1905         * guarantee is that it must see the state asserted by an OOM when
1906         * this function is called as a result of userland actions
1907         * triggered by the notification of the OOM.  This is trivially
1908         * achieved by invoking mem_cgroup_mark_under_oom() before
1909         * triggering notification.
1910         */
1911        if (memcg && memcg->under_oom)
1912                __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1913}
1914
1915enum oom_status {
1916        OOM_SUCCESS,
1917        OOM_FAILED,
1918        OOM_ASYNC,
1919        OOM_SKIPPED
1920};
1921
1922static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1923{
1924        enum oom_status ret;
1925        bool locked;
1926
1927        if (order > PAGE_ALLOC_COSTLY_ORDER)
1928                return OOM_SKIPPED;
1929
1930        memcg_memory_event(memcg, MEMCG_OOM);
1931
1932        /*
1933         * We are in the middle of the charge context here, so we
1934         * don't want to block when potentially sitting on a callstack
1935         * that holds all kinds of filesystem and mm locks.
1936         *
1937         * cgroup1 allows disabling the OOM killer and waiting for outside
1938         * handling until the charge can succeed; remember the context and put
1939         * the task to sleep at the end of the page fault when all locks are
1940         * released.
1941         *
1942         * On the other hand, in-kernel OOM killer allows for an async victim
1943         * memory reclaim (oom_reaper) and that means that we are not solely
1944         * relying on the oom victim to make a forward progress and we can
1945         * invoke the oom killer here.
1946         *
1947         * Please note that mem_cgroup_out_of_memory might fail to find a
1948         * victim and then we have to bail out from the charge path.
1949         */
1950        if (memcg->oom_kill_disable) {
1951                if (!current->in_user_fault)
1952                        return OOM_SKIPPED;
1953                css_get(&memcg->css);
1954                current->memcg_in_oom = memcg;
1955                current->memcg_oom_gfp_mask = mask;
1956                current->memcg_oom_order = order;
1957
1958                return OOM_ASYNC;
1959        }
1960
1961        mem_cgroup_mark_under_oom(memcg);
1962
1963        locked = mem_cgroup_oom_trylock(memcg);
1964
1965        if (locked)
1966                mem_cgroup_oom_notify(memcg);
1967
1968        mem_cgroup_unmark_under_oom(memcg);
1969        if (mem_cgroup_out_of_memory(memcg, mask, order))
1970                ret = OOM_SUCCESS;
1971        else
1972                ret = OOM_FAILED;
1973
1974        if (locked)
1975                mem_cgroup_oom_unlock(memcg);
1976
1977        return ret;
1978}
1979
1980/**
1981 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1982 * @handle: actually kill/wait or just clean up the OOM state
1983 *
1984 * This has to be called at the end of a page fault if the memcg OOM
1985 * handler was enabled.
1986 *
1987 * Memcg supports userspace OOM handling where failed allocations must
1988 * sleep on a waitqueue until the userspace task resolves the
1989 * situation.  Sleeping directly in the charge context with all kinds
1990 * of locks held is not a good idea, instead we remember an OOM state
1991 * in the task and mem_cgroup_oom_synchronize() has to be called at
1992 * the end of the page fault to complete the OOM handling.
1993 *
1994 * Returns %true if an ongoing memcg OOM situation was detected and
1995 * completed, %false otherwise.
1996 */
1997bool mem_cgroup_oom_synchronize(bool handle)
1998{
1999        struct mem_cgroup *memcg = current->memcg_in_oom;
2000        struct oom_wait_info owait;
2001        bool locked;
2002
2003        /* OOM is global, do not handle */
2004        if (!memcg)
2005                return false;
2006
2007        if (!handle)
2008                goto cleanup;
2009
2010        owait.memcg = memcg;
2011        owait.wait.flags = 0;
2012        owait.wait.func = memcg_oom_wake_function;
2013        owait.wait.private = current;
2014        INIT_LIST_HEAD(&owait.wait.entry);
2015
2016        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2017        mem_cgroup_mark_under_oom(memcg);
2018
2019        locked = mem_cgroup_oom_trylock(memcg);
2020
2021        if (locked)
2022                mem_cgroup_oom_notify(memcg);
2023
2024        if (locked && !memcg->oom_kill_disable) {
2025                mem_cgroup_unmark_under_oom(memcg);
2026                finish_wait(&memcg_oom_waitq, &owait.wait);
2027                mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2028                                         current->memcg_oom_order);
2029        } else {
2030                schedule();
2031                mem_cgroup_unmark_under_oom(memcg);
2032                finish_wait(&memcg_oom_waitq, &owait.wait);
2033        }
2034
2035        if (locked) {
2036                mem_cgroup_oom_unlock(memcg);
2037                /*
2038                 * There is no guarantee that an OOM-lock contender
2039                 * sees the wakeups triggered by the OOM kill
2040                 * uncharges.  Wake any sleepers explicitely.
2041                 */
2042                memcg_oom_recover(memcg);
2043        }
2044cleanup:
2045        current->memcg_in_oom = NULL;
2046        css_put(&memcg->css);
2047        return true;
2048}
2049
2050/**
2051 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2052 * @victim: task to be killed by the OOM killer
2053 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2054 *
2055 * Returns a pointer to a memory cgroup, which has to be cleaned up
2056 * by killing all belonging OOM-killable tasks.
2057 *
2058 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2059 */
2060struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2061                                            struct mem_cgroup *oom_domain)
2062{
2063        struct mem_cgroup *oom_group = NULL;
2064        struct mem_cgroup *memcg;
2065
2066        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2067                return NULL;
2068
2069        if (!oom_domain)
2070                oom_domain = root_mem_cgroup;
2071
2072        rcu_read_lock();
2073
2074        memcg = mem_cgroup_from_task(victim);
2075        if (memcg == root_mem_cgroup)
2076                goto out;
2077
2078        /*
2079         * If the victim task has been asynchronously moved to a different
2080         * memory cgroup, we might end up killing tasks outside oom_domain.
2081         * In this case it's better to ignore memory.group.oom.
2082         */
2083        if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2084                goto out;
2085
2086        /*
2087         * Traverse the memory cgroup hierarchy from the victim task's
2088         * cgroup up to the OOMing cgroup (or root) to find the
2089         * highest-level memory cgroup with oom.group set.
2090         */
2091        for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2092                if (memcg->oom_group)
2093                        oom_group = memcg;
2094
2095                if (memcg == oom_domain)
2096                        break;
2097        }
2098
2099        if (oom_group)
2100                css_get(&oom_group->css);
2101out:
2102        rcu_read_unlock();
2103
2104        return oom_group;
2105}
2106
2107void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2108{
2109        pr_info("Tasks in ");
2110        pr_cont_cgroup_path(memcg->css.cgroup);
2111        pr_cont(" are going to be killed due to memory.oom.group set\n");
2112}
2113
2114/**
2115 * lock_page_memcg - lock a page and memcg binding
2116 * @page: the page
2117 *
2118 * This function protects unlocked LRU pages from being moved to
2119 * another cgroup.
2120 *
2121 * It ensures lifetime of the returned memcg. Caller is responsible
2122 * for the lifetime of the page; __unlock_page_memcg() is available
2123 * when @page might get freed inside the locked section.
2124 */
2125struct mem_cgroup *lock_page_memcg(struct page *page)
2126{
2127        struct page *head = compound_head(page); /* rmap on tail pages */
2128        struct mem_cgroup *memcg;
2129        unsigned long flags;
2130
2131        /*
2132         * The RCU lock is held throughout the transaction.  The fast
2133         * path can get away without acquiring the memcg->move_lock
2134         * because page moving starts with an RCU grace period.
2135         *
2136         * The RCU lock also protects the memcg from being freed when
2137         * the page state that is going to change is the only thing
2138         * preventing the page itself from being freed. E.g. writeback
2139         * doesn't hold a page reference and relies on PG_writeback to
2140         * keep off truncation, migration and so forth.
2141         */
2142        rcu_read_lock();
2143
2144        if (mem_cgroup_disabled())
2145                return NULL;
2146again:
2147        memcg = page_memcg(head);
2148        if (unlikely(!memcg))
2149                return NULL;
2150
2151#ifdef CONFIG_PROVE_LOCKING
2152        local_irq_save(flags);
2153        might_lock(&memcg->move_lock);
2154        local_irq_restore(flags);
2155#endif
2156
2157        if (atomic_read(&memcg->moving_account) <= 0)
2158                return memcg;
2159
2160        spin_lock_irqsave(&memcg->move_lock, flags);
2161        if (memcg != page_memcg(head)) {
2162                spin_unlock_irqrestore(&memcg->move_lock, flags);
2163                goto again;
2164        }
2165
2166        /*
2167         * When charge migration first begins, we can have locked and
2168         * unlocked page stat updates happening concurrently.  Track
2169         * the task who has the lock for unlock_page_memcg().
2170         */
2171        memcg->move_lock_task = current;
2172        memcg->move_lock_flags = flags;
2173
2174        return memcg;
2175}
2176EXPORT_SYMBOL(lock_page_memcg);
2177
2178/**
2179 * __unlock_page_memcg - unlock and unpin a memcg
2180 * @memcg: the memcg
2181 *
2182 * Unlock and unpin a memcg returned by lock_page_memcg().
2183 */
2184void __unlock_page_memcg(struct mem_cgroup *memcg)
2185{
2186        if (memcg && memcg->move_lock_task == current) {
2187                unsigned long flags = memcg->move_lock_flags;
2188
2189                memcg->move_lock_task = NULL;
2190                memcg->move_lock_flags = 0;
2191
2192                spin_unlock_irqrestore(&memcg->move_lock, flags);
2193        }
2194
2195        rcu_read_unlock();
2196}
2197
2198/**
2199 * unlock_page_memcg - unlock a page and memcg binding
2200 * @page: the page
2201 */
2202void unlock_page_memcg(struct page *page)
2203{
2204        struct page *head = compound_head(page);
2205
2206        __unlock_page_memcg(page_memcg(head));
2207}
2208EXPORT_SYMBOL(unlock_page_memcg);
2209
2210struct memcg_stock_pcp {
2211        struct mem_cgroup *cached; /* this never be root cgroup */
2212        unsigned int nr_pages;
2213
2214#ifdef CONFIG_MEMCG_KMEM
2215        struct obj_cgroup *cached_objcg;
2216        unsigned int nr_bytes;
2217#endif
2218
2219        struct work_struct work;
2220        unsigned long flags;
2221#define FLUSHING_CACHED_CHARGE  0
2222};
2223static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2224static DEFINE_MUTEX(percpu_charge_mutex);
2225
2226#ifdef CONFIG_MEMCG_KMEM
2227static void drain_obj_stock(struct memcg_stock_pcp *stock);
2228static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2229                                     struct mem_cgroup *root_memcg);
2230
2231#else
2232static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2233{
2234}
2235static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2236                                     struct mem_cgroup *root_memcg)
2237{
2238        return false;
2239}
2240#endif
2241
2242/**
2243 * consume_stock: Try to consume stocked charge on this cpu.
2244 * @memcg: memcg to consume from.
2245 * @nr_pages: how many pages to charge.
2246 *
2247 * The charges will only happen if @memcg matches the current cpu's memcg
2248 * stock, and at least @nr_pages are available in that stock.  Failure to
2249 * service an allocation will refill the stock.
2250 *
2251 * returns true if successful, false otherwise.
2252 */
2253static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2254{
2255        struct memcg_stock_pcp *stock;
2256        unsigned long flags;
2257        bool ret = false;
2258
2259        if (nr_pages > MEMCG_CHARGE_BATCH)
2260                return ret;
2261
2262        local_irq_save(flags);
2263
2264        stock = this_cpu_ptr(&memcg_stock);
2265        if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2266                stock->nr_pages -= nr_pages;
2267                ret = true;
2268        }
2269
2270        local_irq_restore(flags);
2271
2272        return ret;
2273}
2274
2275/*
2276 * Returns stocks cached in percpu and reset cached information.
2277 */
2278static void drain_stock(struct memcg_stock_pcp *stock)
2279{
2280        struct mem_cgroup *old = stock->cached;
2281
2282        if (!old)
2283                return;
2284
2285        if (stock->nr_pages) {
2286                page_counter_uncharge(&old->memory, stock->nr_pages);
2287                if (do_memsw_account())
2288                        page_counter_uncharge(&old->memsw, stock->nr_pages);
2289                stock->nr_pages = 0;
2290        }
2291
2292        css_put(&old->css);
2293        stock->cached = NULL;
2294}
2295
2296static void drain_local_stock(struct work_struct *dummy)
2297{
2298        struct memcg_stock_pcp *stock;
2299        unsigned long flags;
2300
2301        /*
2302         * The only protection from memory hotplug vs. drain_stock races is
2303         * that we always operate on local CPU stock here with IRQ disabled
2304         */
2305        local_irq_save(flags);
2306
2307        stock = this_cpu_ptr(&memcg_stock);
2308        drain_obj_stock(stock);
2309        drain_stock(stock);
2310        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2311
2312        local_irq_restore(flags);
2313}
2314
2315/*
2316 * Cache charges(val) to local per_cpu area.
2317 * This will be consumed by consume_stock() function, later.
2318 */
2319static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2320{
2321        struct memcg_stock_pcp *stock;
2322        unsigned long flags;
2323
2324        local_irq_save(flags);
2325
2326        stock = this_cpu_ptr(&memcg_stock);
2327        if (stock->cached != memcg) { /* reset if necessary */
2328                drain_stock(stock);
2329                css_get(&memcg->css);
2330                stock->cached = memcg;
2331        }
2332        stock->nr_pages += nr_pages;
2333
2334        if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2335                drain_stock(stock);
2336
2337        local_irq_restore(flags);
2338}
2339
2340/*
2341 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2342 * of the hierarchy under it.
2343 */
2344static void drain_all_stock(struct mem_cgroup *root_memcg)
2345{
2346        int cpu, curcpu;
2347
2348        /* If someone's already draining, avoid adding running more workers. */
2349        if (!mutex_trylock(&percpu_charge_mutex))
2350                return;
2351        /*
2352         * Notify other cpus that system-wide "drain" is running
2353         * We do not care about races with the cpu hotplug because cpu down
2354         * as well as workers from this path always operate on the local
2355         * per-cpu data. CPU up doesn't touch memcg_stock at all.
2356         */
2357        curcpu = get_cpu();
2358        for_each_online_cpu(cpu) {
2359                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2360                struct mem_cgroup *memcg;
2361                bool flush = false;
2362
2363                rcu_read_lock();
2364                memcg = stock->cached;
2365                if (memcg && stock->nr_pages &&
2366                    mem_cgroup_is_descendant(memcg, root_memcg))
2367                        flush = true;
2368                if (obj_stock_flush_required(stock, root_memcg))
2369                        flush = true;
2370                rcu_read_unlock();
2371
2372                if (flush &&
2373                    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2374                        if (cpu == curcpu)
2375                                drain_local_stock(&stock->work);
2376                        else
2377                                schedule_work_on(cpu, &stock->work);
2378                }
2379        }
2380        put_cpu();
2381        mutex_unlock(&percpu_charge_mutex);
2382}
2383
2384static int memcg_hotplug_cpu_dead(unsigned int cpu)
2385{
2386        struct memcg_stock_pcp *stock;
2387        struct mem_cgroup *memcg, *mi;
2388
2389        stock = &per_cpu(memcg_stock, cpu);
2390        drain_stock(stock);
2391
2392        for_each_mem_cgroup(memcg) {
2393                int i;
2394
2395                for (i = 0; i < MEMCG_NR_STAT; i++) {
2396                        int nid;
2397                        long x;
2398
2399                        x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2400                        if (x)
2401                                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2402                                        atomic_long_add(x, &memcg->vmstats[i]);
2403
2404                        if (i >= NR_VM_NODE_STAT_ITEMS)
2405                                continue;
2406
2407                        for_each_node(nid) {
2408                                struct mem_cgroup_per_node *pn;
2409
2410                                pn = mem_cgroup_nodeinfo(memcg, nid);
2411                                x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2412                                if (x)
2413                                        do {
2414                                                atomic_long_add(x, &pn->lruvec_stat[i]);
2415                                        } while ((pn = parent_nodeinfo(pn, nid)));
2416                        }
2417                }
2418
2419                for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2420                        long x;
2421
2422                        x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2423                        if (x)
2424                                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2425                                        atomic_long_add(x, &memcg->vmevents[i]);
2426                }
2427        }
2428
2429        return 0;
2430}
2431
2432static unsigned long reclaim_high(struct mem_cgroup *memcg,
2433                                  unsigned int nr_pages,
2434                                  gfp_t gfp_mask)
2435{
2436        unsigned long nr_reclaimed = 0;
2437
2438        do {
2439                unsigned long pflags;
2440
2441                if (page_counter_read(&memcg->memory) <=
2442                    READ_ONCE(memcg->memory.high))
2443                        continue;
2444
2445                memcg_memory_event(memcg, MEMCG_HIGH);
2446
2447                psi_memstall_enter(&pflags);
2448                nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2449                                                             gfp_mask, true);
2450                psi_memstall_leave(&pflags);
2451        } while ((memcg = parent_mem_cgroup(memcg)) &&
2452                 !mem_cgroup_is_root(memcg));
2453
2454        return nr_reclaimed;
2455}
2456
2457static void high_work_func(struct work_struct *work)
2458{
2459        struct mem_cgroup *memcg;
2460
2461        memcg = container_of(work, struct mem_cgroup, high_work);
2462        reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2463}
2464
2465/*
2466 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2467 * enough to still cause a significant slowdown in most cases, while still
2468 * allowing diagnostics and tracing to proceed without becoming stuck.
2469 */
2470#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2471
2472/*
2473 * When calculating the delay, we use these either side of the exponentiation to
2474 * maintain precision and scale to a reasonable number of jiffies (see the table
2475 * below.
2476 *
2477 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2478 *   overage ratio to a delay.
2479 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2480 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2481 *   to produce a reasonable delay curve.
2482 *
2483 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2484 * reasonable delay curve compared to precision-adjusted overage, not
2485 * penalising heavily at first, but still making sure that growth beyond the
2486 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2487 * example, with a high of 100 megabytes:
2488 *
2489 *  +-------+------------------------+
2490 *  | usage | time to allocate in ms |
2491 *  +-------+------------------------+
2492 *  | 100M  |                      0 |
2493 *  | 101M  |                      6 |
2494 *  | 102M  |                     25 |
2495 *  | 103M  |                     57 |
2496 *  | 104M  |                    102 |
2497 *  | 105M  |                    159 |
2498 *  | 106M  |                    230 |
2499 *  | 107M  |                    313 |
2500 *  | 108M  |                    409 |
2501 *  | 109M  |                    518 |
2502 *  | 110M  |                    639 |
2503 *  | 111M  |                    774 |
2504 *  | 112M  |                    921 |
2505 *  | 113M  |                   1081 |
2506 *  | 114M  |                   1254 |
2507 *  | 115M  |                   1439 |
2508 *  | 116M  |                   1638 |
2509 *  | 117M  |                   1849 |
2510 *  | 118M  |                   2000 |
2511 *  | 119M  |                   2000 |
2512 *  | 120M  |                   2000 |
2513 *  +-------+------------------------+
2514 */
2515 #define MEMCG_DELAY_PRECISION_SHIFT 20
2516 #define MEMCG_DELAY_SCALING_SHIFT 14
2517
2518static u64 calculate_overage(unsigned long usage, unsigned long high)
2519{
2520        u64 overage;
2521
2522        if (usage <= high)
2523                return 0;
2524
2525        /*
2526         * Prevent division by 0 in overage calculation by acting as if
2527         * it was a threshold of 1 page
2528         */
2529        high = max(high, 1UL);
2530
2531        overage = usage - high;
2532        overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2533        return div64_u64(overage, high);
2534}
2535
2536static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2537{
2538        u64 overage, max_overage = 0;
2539
2540        do {
2541                overage = calculate_overage(page_counter_read(&memcg->memory),
2542                                            READ_ONCE(memcg->memory.high));
2543                max_overage = max(overage, max_overage);
2544        } while ((memcg = parent_mem_cgroup(memcg)) &&
2545                 !mem_cgroup_is_root(memcg));
2546
2547        return max_overage;
2548}
2549
2550static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2551{
2552        u64 overage, max_overage = 0;
2553
2554        do {
2555                overage = calculate_overage(page_counter_read(&memcg->swap),
2556                                            READ_ONCE(memcg->swap.high));
2557                if (overage)
2558                        memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2559                max_overage = max(overage, max_overage);
2560        } while ((memcg = parent_mem_cgroup(memcg)) &&
2561                 !mem_cgroup_is_root(memcg));
2562
2563        return max_overage;
2564}
2565
2566/*
2567 * Get the number of jiffies that we should penalise a mischievous cgroup which
2568 * is exceeding its memory.high by checking both it and its ancestors.
2569 */
2570static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2571                                          unsigned int nr_pages,
2572                                          u64 max_overage)
2573{
2574        unsigned long penalty_jiffies;
2575
2576        if (!max_overage)
2577                return 0;
2578
2579        /*
2580         * We use overage compared to memory.high to calculate the number of
2581         * jiffies to sleep (penalty_jiffies). Ideally this value should be
2582         * fairly lenient on small overages, and increasingly harsh when the
2583         * memcg in question makes it clear that it has no intention of stopping
2584         * its crazy behaviour, so we exponentially increase the delay based on
2585         * overage amount.
2586         */
2587        penalty_jiffies = max_overage * max_overage * HZ;
2588        penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2589        penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2590
2591        /*
2592         * Factor in the task's own contribution to the overage, such that four
2593         * N-sized allocations are throttled approximately the same as one
2594         * 4N-sized allocation.
2595         *
2596         * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2597         * larger the current charge patch is than that.
2598         */
2599        return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2600}
2601
2602/*
2603 * Scheduled by try_charge() to be executed from the userland return path
2604 * and reclaims memory over the high limit.
2605 */
2606void mem_cgroup_handle_over_high(void)
2607{
2608        unsigned long penalty_jiffies;
2609        unsigned long pflags;
2610        unsigned long nr_reclaimed;
2611        unsigned int nr_pages = current->memcg_nr_pages_over_high;
2612        int nr_retries = MAX_RECLAIM_RETRIES;
2613        struct mem_cgroup *memcg;
2614        bool in_retry = false;
2615
2616        if (likely(!nr_pages))
2617                return;
2618
2619        memcg = get_mem_cgroup_from_mm(current->mm);
2620        current->memcg_nr_pages_over_high = 0;
2621
2622retry_reclaim:
2623        /*
2624         * The allocating task should reclaim at least the batch size, but for
2625         * subsequent retries we only want to do what's necessary to prevent oom
2626         * or breaching resource isolation.
2627         *
2628         * This is distinct from memory.max or page allocator behaviour because
2629         * memory.high is currently batched, whereas memory.max and the page
2630         * allocator run every time an allocation is made.
2631         */
2632        nr_reclaimed = reclaim_high(memcg,
2633                                    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2634                                    GFP_KERNEL);
2635
2636        /*
2637         * memory.high is breached and reclaim is unable to keep up. Throttle
2638         * allocators proactively to slow down excessive growth.
2639         */
2640        penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2641                                               mem_find_max_overage(memcg));
2642
2643        penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2644                                                swap_find_max_overage(memcg));
2645
2646        /*
2647         * Clamp the max delay per usermode return so as to still keep the
2648         * application moving forwards and also permit diagnostics, albeit
2649         * extremely slowly.
2650         */
2651        penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2652
2653        /*
2654         * Don't sleep if the amount of jiffies this memcg owes us is so low
2655         * that it's not even worth doing, in an attempt to be nice to those who
2656         * go only a small amount over their memory.high value and maybe haven't
2657         * been aggressively reclaimed enough yet.
2658         */
2659        if (penalty_jiffies <= HZ / 100)
2660                goto out;
2661
2662        /*
2663         * If reclaim is making forward progress but we're still over
2664         * memory.high, we want to encourage that rather than doing allocator
2665         * throttling.
2666         */
2667        if (nr_reclaimed || nr_retries--) {
2668                in_retry = true;
2669                goto retry_reclaim;
2670        }
2671
2672        /*
2673         * If we exit early, we're guaranteed to die (since
2674         * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2675         * need to account for any ill-begotten jiffies to pay them off later.
2676         */
2677        psi_memstall_enter(&pflags);
2678        schedule_timeout_killable(penalty_jiffies);
2679        psi_memstall_leave(&pflags);
2680
2681out:
2682        css_put(&memcg->css);
2683}
2684
2685static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2686                      unsigned int nr_pages)
2687{
2688        unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2689        int nr_retries = MAX_RECLAIM_RETRIES;
2690        struct mem_cgroup *mem_over_limit;
2691        struct page_counter *counter;
2692        enum oom_status oom_status;
2693        unsigned long nr_reclaimed;
2694        bool may_swap = true;
2695        bool drained = false;
2696        unsigned long pflags;
2697
2698        if (mem_cgroup_is_root(memcg))
2699                return 0;
2700retry:
2701        if (consume_stock(memcg, nr_pages))
2702                return 0;
2703
2704        if (!do_memsw_account() ||
2705            page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2706                if (page_counter_try_charge(&memcg->memory, batch, &counter))
2707                        goto done_restock;
2708                if (do_memsw_account())
2709                        page_counter_uncharge(&memcg->memsw, batch);
2710                mem_over_limit = mem_cgroup_from_counter(counter, memory);
2711        } else {
2712                mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2713                may_swap = false;
2714        }
2715
2716        if (batch > nr_pages) {
2717                batch = nr_pages;
2718                goto retry;
2719        }
2720
2721        /*
2722         * Memcg doesn't have a dedicated reserve for atomic
2723         * allocations. But like the global atomic pool, we need to
2724         * put the burden of reclaim on regular allocation requests
2725         * and let these go through as privileged allocations.
2726         */
2727        if (gfp_mask & __GFP_ATOMIC)
2728                goto force;
2729
2730        /*
2731         * Unlike in global OOM situations, memcg is not in a physical
2732         * memory shortage.  Allow dying and OOM-killed tasks to
2733         * bypass the last charges so that they can exit quickly and
2734         * free their memory.
2735         */
2736        if (unlikely(should_force_charge()))
2737                goto force;
2738
2739        /*
2740         * Prevent unbounded recursion when reclaim operations need to
2741         * allocate memory. This might exceed the limits temporarily,
2742         * but we prefer facilitating memory reclaim and getting back
2743         * under the limit over triggering OOM kills in these cases.
2744         */
2745        if (unlikely(current->flags & PF_MEMALLOC))
2746                goto force;
2747
2748        if (unlikely(task_in_memcg_oom(current)))
2749                goto nomem;
2750
2751        if (!gfpflags_allow_blocking(gfp_mask))
2752                goto nomem;
2753
2754        memcg_memory_event(mem_over_limit, MEMCG_MAX);
2755
2756        psi_memstall_enter(&pflags);
2757        nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2758                                                    gfp_mask, may_swap);
2759        psi_memstall_leave(&pflags);
2760
2761        if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2762                goto retry;
2763
2764        if (!drained) {
2765                drain_all_stock(mem_over_limit);
2766                drained = true;
2767                goto retry;
2768        }
2769
2770        if (gfp_mask & __GFP_NORETRY)
2771                goto nomem;
2772        /*
2773         * Even though the limit is exceeded at this point, reclaim
2774         * may have been able to free some pages.  Retry the charge
2775         * before killing the task.
2776         *
2777         * Only for regular pages, though: huge pages are rather
2778         * unlikely to succeed so close to the limit, and we fall back
2779         * to regular pages anyway in case of failure.
2780         */
2781        if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2782                goto retry;
2783        /*
2784         * At task move, charge accounts can be doubly counted. So, it's
2785         * better to wait until the end of task_move if something is going on.
2786         */
2787        if (mem_cgroup_wait_acct_move(mem_over_limit))
2788                goto retry;
2789
2790        if (nr_retries--)
2791                goto retry;
2792
2793        if (gfp_mask & __GFP_RETRY_MAYFAIL)
2794                goto nomem;
2795
2796        if (gfp_mask & __GFP_NOFAIL)
2797                goto force;
2798
2799        if (fatal_signal_pending(current))
2800                goto force;
2801
2802        /*
2803         * keep retrying as long as the memcg oom killer is able to make
2804         * a forward progress or bypass the charge if the oom killer
2805         * couldn't make any progress.
2806         */
2807        oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2808                       get_order(nr_pages * PAGE_SIZE));
2809        switch (oom_status) {
2810        case OOM_SUCCESS:
2811                nr_retries = MAX_RECLAIM_RETRIES;
2812                goto retry;
2813        case OOM_FAILED:
2814                goto force;
2815        default:
2816                goto nomem;
2817        }
2818nomem:
2819        if (!(gfp_mask & __GFP_NOFAIL))
2820                return -ENOMEM;
2821force:
2822        /*
2823         * The allocation either can't fail or will lead to more memory
2824         * being freed very soon.  Allow memory usage go over the limit
2825         * temporarily by force charging it.
2826         */
2827        page_counter_charge(&memcg->memory, nr_pages);
2828        if (do_memsw_account())
2829                page_counter_charge(&memcg->memsw, nr_pages);
2830
2831        return 0;
2832
2833done_restock:
2834        if (batch > nr_pages)
2835                refill_stock(memcg, batch - nr_pages);
2836
2837        /*
2838         * If the hierarchy is above the normal consumption range, schedule
2839         * reclaim on returning to userland.  We can perform reclaim here
2840         * if __GFP_RECLAIM but let's always punt for simplicity and so that
2841         * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2842         * not recorded as it most likely matches current's and won't
2843         * change in the meantime.  As high limit is checked again before
2844         * reclaim, the cost of mismatch is negligible.
2845         */
2846        do {
2847                bool mem_high, swap_high;
2848
2849                mem_high = page_counter_read(&memcg->memory) >
2850                        READ_ONCE(memcg->memory.high);
2851                swap_high = page_counter_read(&memcg->swap) >
2852                        READ_ONCE(memcg->swap.high);
2853
2854                /* Don't bother a random interrupted task */
2855                if (in_interrupt()) {
2856                        if (mem_high) {
2857                                schedule_work(&memcg->high_work);
2858                                break;
2859                        }
2860                        continue;
2861                }
2862
2863                if (mem_high || swap_high) {
2864                        /*
2865                         * The allocating tasks in this cgroup will need to do
2866                         * reclaim or be throttled to prevent further growth
2867                         * of the memory or swap footprints.
2868                         *
2869                         * Target some best-effort fairness between the tasks,
2870                         * and distribute reclaim work and delay penalties
2871                         * based on how much each task is actually allocating.
2872                         */
2873                        current->memcg_nr_pages_over_high += batch;
2874                        set_notify_resume(current);
2875                        break;
2876                }
2877        } while ((memcg = parent_mem_cgroup(memcg)));
2878
2879        return 0;
2880}
2881
2882#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2883static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2884{
2885        if (mem_cgroup_is_root(memcg))
2886                return;
2887
2888        page_counter_uncharge(&memcg->memory, nr_pages);
2889        if (do_memsw_account())
2890                page_counter_uncharge(&memcg->memsw, nr_pages);
2891}
2892#endif
2893
2894static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2895{
2896        VM_BUG_ON_PAGE(page_memcg(page), page);
2897        /*
2898         * Any of the following ensures page's memcg stability:
2899         *
2900         * - the page lock
2901         * - LRU isolation
2902         * - lock_page_memcg()
2903         * - exclusive reference
2904         */
2905        page->memcg_data = (unsigned long)memcg;
2906}
2907
2908#ifdef CONFIG_MEMCG_KMEM
2909int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2910                                 gfp_t gfp, bool new_page)
2911{
2912        unsigned int objects = objs_per_slab_page(s, page);
2913        unsigned long memcg_data;
2914        void *vec;
2915
2916        vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2917                           page_to_nid(page));
2918        if (!vec)
2919                return -ENOMEM;
2920
2921        memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2922        if (new_page) {
2923                /*
2924                 * If the slab page is brand new and nobody can yet access
2925                 * it's memcg_data, no synchronization is required and
2926                 * memcg_data can be simply assigned.
2927                 */
2928                page->memcg_data = memcg_data;
2929        } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
2930                /*
2931                 * If the slab page is already in use, somebody can allocate
2932                 * and assign obj_cgroups in parallel. In this case the existing
2933                 * objcg vector should be reused.
2934                 */
2935                kfree(vec);
2936                return 0;
2937        }
2938
2939        kmemleak_not_leak(vec);
2940        return 0;
2941}
2942
2943/*
2944 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2945 *
2946 * A passed kernel object can be a slab object or a generic kernel page, so
2947 * different mechanisms for getting the memory cgroup pointer should be used.
2948 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2949 * can not know for sure how the kernel object is implemented.
2950 * mem_cgroup_from_obj() can be safely used in such cases.
2951 *
2952 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2953 * cgroup_mutex, etc.
2954 */
2955struct mem_cgroup *mem_cgroup_from_obj(void *p)
2956{
2957        struct page *page;
2958
2959        if (mem_cgroup_disabled())
2960                return NULL;
2961
2962        page = virt_to_head_page(p);
2963
2964        /*
2965         * Slab objects are accounted individually, not per-page.
2966         * Memcg membership data for each individual object is saved in
2967         * the page->obj_cgroups.
2968         */
2969        if (page_objcgs_check(page)) {
2970                struct obj_cgroup *objcg;
2971                unsigned int off;
2972
2973                off = obj_to_index(page->slab_cache, page, p);
2974                objcg = page_objcgs(page)[off];
2975                if (objcg)
2976                        return obj_cgroup_memcg(objcg);
2977
2978                return NULL;
2979        }
2980
2981        /*
2982         * page_memcg_check() is used here, because page_has_obj_cgroups()
2983         * check above could fail because the object cgroups vector wasn't set
2984         * at that moment, but it can be set concurrently.
2985         * page_memcg_check(page) will guarantee that a proper memory
2986         * cgroup pointer or NULL will be returned.
2987         */
2988        return page_memcg_check(page);
2989}
2990
2991__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2992{
2993        struct obj_cgroup *objcg = NULL;
2994        struct mem_cgroup *memcg;
2995
2996        if (memcg_kmem_bypass())
2997                return NULL;
2998
2999        rcu_read_lock();
3000        if (unlikely(active_memcg()))
3001                memcg = active_memcg();
3002        else
3003                memcg = mem_cgroup_from_task(current);
3004
3005        for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
3006                objcg = rcu_dereference(memcg->objcg);
3007                if (objcg && obj_cgroup_tryget(objcg))
3008                        break;
3009                objcg = NULL;
3010        }
3011        rcu_read_unlock();
3012
3013        return objcg;
3014}
3015
3016static int memcg_alloc_cache_id(void)
3017{
3018        int id, size;
3019        int err;
3020
3021        id = ida_simple_get(&memcg_cache_ida,
3022                            0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
3023        if (id < 0)
3024                return id;
3025
3026        if (id < memcg_nr_cache_ids)
3027                return id;
3028
3029        /*
3030         * There's no space for the new id in memcg_caches arrays,
3031         * so we have to grow them.
3032         */
3033        down_write(&memcg_cache_ids_sem);
3034
3035        size = 2 * (id + 1);
3036        if (size < MEMCG_CACHES_MIN_SIZE)
3037                size = MEMCG_CACHES_MIN_SIZE;
3038        else if (size > MEMCG_CACHES_MAX_SIZE)
3039                size = MEMCG_CACHES_MAX_SIZE;
3040
3041        err = memcg_update_all_list_lrus(size);
3042        if (!err)
3043                memcg_nr_cache_ids = size;
3044
3045        up_write(&memcg_cache_ids_sem);
3046
3047        if (err) {
3048                ida_simple_remove(&memcg_cache_ida, id);
3049                return err;
3050        }
3051        return id;
3052}
3053
3054static void memcg_free_cache_id(int id)
3055{
3056        ida_simple_remove(&memcg_cache_ida, id);
3057}
3058
3059/**
3060 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
3061 * @memcg: memory cgroup to charge
3062 * @gfp: reclaim mode
3063 * @nr_pages: number of pages to charge
3064 *
3065 * Returns 0 on success, an error code on failure.
3066 */
3067static int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
3068                               unsigned int nr_pages)
3069{
3070        struct page_counter *counter;
3071        int ret;
3072
3073        ret = try_charge(memcg, gfp, nr_pages);
3074        if (ret)
3075                return ret;
3076
3077        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3078            !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3079
3080                /*
3081                 * Enforce __GFP_NOFAIL allocation because callers are not
3082                 * prepared to see failures and likely do not have any failure
3083                 * handling code.
3084                 */
3085                if (gfp & __GFP_NOFAIL) {
3086                        page_counter_charge(&memcg->kmem, nr_pages);
3087                        return 0;
3088                }
3089                cancel_charge(memcg, nr_pages);
3090                return -ENOMEM;
3091        }
3092        return 0;
3093}
3094
3095/**
3096 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3097 * @memcg: memcg to uncharge
3098 * @nr_pages: number of pages to uncharge
3099 */
3100static void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3101{
3102        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3103                page_counter_uncharge(&memcg->kmem, nr_pages);
3104
3105        refill_stock(memcg, nr_pages);
3106}
3107
3108/**
3109 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3110 * @page: page to charge
3111 * @gfp: reclaim mode
3112 * @order: allocation order
3113 *
3114 * Returns 0 on success, an error code on failure.
3115 */
3116int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3117{
3118        struct mem_cgroup *memcg;
3119        int ret = 0;
3120
3121        memcg = get_mem_cgroup_from_current();
3122        if (memcg && !mem_cgroup_is_root(memcg)) {
3123                ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3124                if (!ret) {
3125                        page->memcg_data = (unsigned long)memcg |
3126                                MEMCG_DATA_KMEM;
3127                        return 0;
3128                }
3129                css_put(&memcg->css);
3130        }
3131        return ret;
3132}
3133
3134/**
3135 * __memcg_kmem_uncharge_page: uncharge a kmem page
3136 * @page: page to uncharge
3137 * @order: allocation order
3138 */
3139void __memcg_kmem_uncharge_page(struct page *page, int order)
3140{
3141        struct mem_cgroup *memcg = page_memcg(page);
3142        unsigned int nr_pages = 1 << order;
3143
3144        if (!memcg)
3145                return;
3146
3147        VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3148        __memcg_kmem_uncharge(memcg, nr_pages);
3149        page->memcg_data = 0;
3150        css_put(&memcg->css);
3151}
3152
3153static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3154{
3155        struct memcg_stock_pcp *stock;
3156        unsigned long flags;
3157        bool ret = false;
3158
3159        local_irq_save(flags);
3160
3161        stock = this_cpu_ptr(&memcg_stock);
3162        if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3163                stock->nr_bytes -= nr_bytes;
3164                ret = true;
3165        }
3166
3167        local_irq_restore(flags);
3168
3169        return ret;
3170}
3171
3172static void drain_obj_stock(struct memcg_stock_pcp *stock)
3173{
3174        struct obj_cgroup *old = stock->cached_objcg;
3175
3176        if (!old)
3177                return;
3178
3179        if (stock->nr_bytes) {
3180                unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3181                unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3182
3183                if (nr_pages) {
3184                        rcu_read_lock();
3185                        __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3186                        rcu_read_unlock();
3187                }
3188
3189                /*
3190                 * The leftover is flushed to the centralized per-memcg value.
3191                 * On the next attempt to refill obj stock it will be moved
3192                 * to a per-cpu stock (probably, on an other CPU), see
3193                 * refill_obj_stock().
3194                 *
3195                 * How often it's flushed is a trade-off between the memory
3196                 * limit enforcement accuracy and potential CPU contention,
3197                 * so it might be changed in the future.
3198                 */
3199                atomic_add(nr_bytes, &old->nr_charged_bytes);
3200                stock->nr_bytes = 0;
3201        }
3202
3203        obj_cgroup_put(old);
3204        stock->cached_objcg = NULL;
3205}
3206
3207static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3208                                     struct mem_cgroup *root_memcg)
3209{
3210        struct mem_cgroup *memcg;
3211
3212        if (stock->cached_objcg) {
3213                memcg = obj_cgroup_memcg(stock->cached_objcg);
3214                if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3215                        return true;
3216        }
3217
3218        return false;
3219}
3220
3221static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3222{
3223        struct memcg_stock_pcp *stock;
3224        unsigned long flags;
3225
3226        local_irq_save(flags);
3227
3228        stock = this_cpu_ptr(&memcg_stock);
3229        if (stock->cached_objcg != objcg) { /* reset if necessary */
3230                drain_obj_stock(stock);
3231                obj_cgroup_get(objcg);
3232                stock->cached_objcg = objcg;
3233                stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3234        }
3235        stock->nr_bytes += nr_bytes;
3236
3237        if (stock->nr_bytes > PAGE_SIZE)
3238                drain_obj_stock(stock);
3239
3240        local_irq_restore(flags);
3241}
3242
3243int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3244{
3245        struct mem_cgroup *memcg;
3246        unsigned int nr_pages, nr_bytes;
3247        int ret;
3248
3249        if (consume_obj_stock(objcg, size))
3250                return 0;
3251
3252        /*
3253         * In theory, memcg->nr_charged_bytes can have enough
3254         * pre-charged bytes to satisfy the allocation. However,
3255         * flushing memcg->nr_charged_bytes requires two atomic
3256         * operations, and memcg->nr_charged_bytes can't be big,
3257         * so it's better to ignore it and try grab some new pages.
3258         * memcg->nr_charged_bytes will be flushed in
3259         * refill_obj_stock(), called from this function or
3260         * independently later.
3261         */
3262        rcu_read_lock();
3263retry:
3264        memcg = obj_cgroup_memcg(objcg);
3265        if (unlikely(!css_tryget(&memcg->css)))
3266                goto retry;
3267        rcu_read_unlock();
3268
3269        nr_pages = size >> PAGE_SHIFT;
3270        nr_bytes = size & (PAGE_SIZE - 1);
3271
3272        if (nr_bytes)
3273                nr_pages += 1;
3274
3275        ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3276        if (!ret && nr_bytes)
3277                refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3278
3279        css_put(&memcg->css);
3280        return ret;
3281}
3282
3283void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3284{
3285        refill_obj_stock(objcg, size);
3286}
3287
3288#endif /* CONFIG_MEMCG_KMEM */
3289
3290/*
3291 * Because page_memcg(head) is not set on tails, set it now.
3292 */
3293void split_page_memcg(struct page *head, unsigned int nr)
3294{
3295        struct mem_cgroup *memcg = page_memcg(head);
3296        int i;
3297
3298        if (mem_cgroup_disabled() || !memcg)
3299                return;
3300
3301        for (i = 1; i < nr; i++)
3302                head[i].memcg_data = head->memcg_data;
3303        css_get_many(&memcg->css, nr - 1);
3304}
3305
3306#ifdef CONFIG_MEMCG_SWAP
3307/**
3308 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3309 * @entry: swap entry to be moved
3310 * @from:  mem_cgroup which the entry is moved from
3311 * @to:  mem_cgroup which the entry is moved to
3312 *
3313 * It succeeds only when the swap_cgroup's record for this entry is the same
3314 * as the mem_cgroup's id of @from.
3315 *
3316 * Returns 0 on success, -EINVAL on failure.
3317 *
3318 * The caller must have charged to @to, IOW, called page_counter_charge() about
3319 * both res and memsw, and called css_get().
3320 */
3321static int mem_cgroup_move_swap_account(swp_entry_t entry,
3322                                struct mem_cgroup *from, struct mem_cgroup *to)
3323{
3324        unsigned short old_id, new_id;
3325
3326        old_id = mem_cgroup_id(from);
3327        new_id = mem_cgroup_id(to);
3328
3329        if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3330                mod_memcg_state(from, MEMCG_SWAP, -1);
3331                mod_memcg_state(to, MEMCG_SWAP, 1);
3332                return 0;
3333        }
3334        return -EINVAL;
3335}
3336#else
3337static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3338                                struct mem_cgroup *from, struct mem_cgroup *to)
3339{
3340        return -EINVAL;
3341}
3342#endif
3343
3344static DEFINE_MUTEX(memcg_max_mutex);
3345
3346static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3347                                 unsigned long max, bool memsw)
3348{
3349        bool enlarge = false;
3350        bool drained = false;
3351        int ret;
3352        bool limits_invariant;
3353        struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3354
3355        do {
3356                if (signal_pending(current)) {
3357                        ret = -EINTR;
3358                        break;
3359                }
3360
3361                mutex_lock(&memcg_max_mutex);
3362                /*
3363                 * Make sure that the new limit (memsw or memory limit) doesn't
3364                 * break our basic invariant rule memory.max <= memsw.max.
3365                 */
3366                limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3367                                           max <= memcg->memsw.max;
3368                if (!limits_invariant) {
3369                        mutex_unlock(&memcg_max_mutex);
3370                        ret = -EINVAL;
3371                        break;
3372                }
3373                if (max > counter->max)
3374                        enlarge = true;
3375                ret = page_counter_set_max(counter, max);
3376                mutex_unlock(&memcg_max_mutex);
3377
3378                if (!ret)
3379                        break;
3380
3381                if (!drained) {
3382                        drain_all_stock(memcg);
3383                        drained = true;
3384                        continue;
3385                }
3386
3387                if (!try_to_free_mem_cgroup_pages(memcg, 1,
3388                                        GFP_KERNEL, !memsw)) {
3389                        ret = -EBUSY;
3390                        break;
3391                }
3392        } while (true);
3393
3394        if (!ret && enlarge)
3395                memcg_oom_recover(memcg);
3396
3397        return ret;
3398}
3399
3400unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3401                                            gfp_t gfp_mask,
3402                                            unsigned long *total_scanned)
3403{
3404        unsigned long nr_reclaimed = 0;
3405        struct mem_cgroup_per_node *mz, *next_mz = NULL;
3406        unsigned long reclaimed;
3407        int loop = 0;
3408        struct mem_cgroup_tree_per_node *mctz;
3409        unsigned long excess;
3410        unsigned long nr_scanned;
3411
3412        if (order > 0)
3413                return 0;
3414
3415        mctz = soft_limit_tree_node(pgdat->node_id);
3416
3417        /*
3418         * Do not even bother to check the largest node if the root
3419         * is empty. Do it lockless to prevent lock bouncing. Races
3420         * are acceptable as soft limit is best effort anyway.
3421         */
3422        if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3423                return 0;
3424
3425        /*
3426         * This loop can run a while, specially if mem_cgroup's continuously
3427         * keep exceeding their soft limit and putting the system under
3428         * pressure
3429         */
3430        do {
3431                if (next_mz)
3432                        mz = next_mz;
3433                else
3434                        mz = mem_cgroup_largest_soft_limit_node(mctz);
3435                if (!mz)
3436                        break;
3437
3438                nr_scanned = 0;
3439                reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3440                                                    gfp_mask, &nr_scanned);
3441                nr_reclaimed += reclaimed;
3442                *total_scanned += nr_scanned;
3443                spin_lock_irq(&mctz->lock);
3444                __mem_cgroup_remove_exceeded(mz, mctz);
3445
3446                /*
3447                 * If we failed to reclaim anything from this memory cgroup
3448                 * it is time to move on to the next cgroup
3449                 */
3450                next_mz = NULL;
3451                if (!reclaimed)
3452                        next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3453
3454                excess = soft_limit_excess(mz->memcg);
3455                /*
3456                 * One school of thought says that we should not add
3457                 * back the node to the tree if reclaim returns 0.
3458                 * But our reclaim could return 0, simply because due
3459                 * to priority we are exposing a smaller subset of
3460                 * memory to reclaim from. Consider this as a longer
3461                 * term TODO.
3462                 */
3463                /* If excess == 0, no tree ops */
3464                __mem_cgroup_insert_exceeded(mz, mctz, excess);
3465                spin_unlock_irq(&mctz->lock);
3466                css_put(&mz->memcg->css);
3467                loop++;
3468                /*
3469                 * Could not reclaim anything and there are no more
3470                 * mem cgroups to try or we seem to be looping without
3471                 * reclaiming anything.
3472                 */
3473                if (!nr_reclaimed &&
3474                        (next_mz == NULL ||
3475                        loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3476                        break;
3477        } while (!nr_reclaimed);
3478        if (next_mz)
3479                css_put(&next_mz->memcg->css);
3480        return nr_reclaimed;
3481}
3482
3483/*
3484 * Reclaims as many pages from the given memcg as possible.
3485 *
3486 * Caller is responsible for holding css reference for memcg.
3487 */
3488static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3489{
3490        int nr_retries = MAX_RECLAIM_RETRIES;
3491
3492        /* we call try-to-free pages for make this cgroup empty */
3493        lru_add_drain_all();
3494
3495        drain_all_stock(memcg);
3496
3497        /* try to free all pages in this cgroup */
3498        while (nr_retries && page_counter_read(&memcg->memory)) {
3499                int progress;
3500
3501                if (signal_pending(current))
3502                        return -EINTR;
3503
3504                progress = try_to_free_mem_cgroup_pages(memcg, 1,
3505                                                        GFP_KERNEL, true);
3506                if (!progress) {
3507                        nr_retries--;
3508                        /* maybe some writeback is necessary */
3509                        congestion_wait(BLK_RW_ASYNC, HZ/10);
3510                }
3511
3512        }
3513
3514        return 0;
3515}
3516
3517static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3518                                            char *buf, size_t nbytes,
3519                                            loff_t off)
3520{
3521        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3522
3523        if (mem_cgroup_is_root(memcg))
3524                return -EINVAL;
3525        return mem_cgroup_force_empty(memcg) ?: nbytes;
3526}
3527
3528static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3529                                     struct cftype *cft)
3530{
3531        return 1;
3532}
3533
3534static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3535                                      struct cftype *cft, u64 val)
3536{
3537        if (val == 1)
3538                return 0;
3539
3540        pr_warn_once("Non-hierarchical mode is deprecated. "
3541                     "Please report your usecase to linux-mm@kvack.org if you "
3542                     "depend on this functionality.\n");
3543
3544        return -EINVAL;
3545}
3546
3547static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3548{
3549        unsigned long val;
3550
3551        if (mem_cgroup_is_root(memcg)) {
3552                val = memcg_page_state(memcg, NR_FILE_PAGES) +
3553                        memcg_page_state(memcg, NR_ANON_MAPPED);
3554                if (swap)
3555                        val += memcg_page_state(memcg, MEMCG_SWAP);
3556        } else {
3557                if (!swap)
3558                        val = page_counter_read(&memcg->memory);
3559                else
3560                        val = page_counter_read(&memcg->memsw);
3561        }
3562        return val;
3563}
3564
3565enum {
3566        RES_USAGE,
3567        RES_LIMIT,
3568        RES_MAX_USAGE,
3569        RES_FAILCNT,
3570        RES_SOFT_LIMIT,
3571};
3572
3573static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3574                               struct cftype *cft)
3575{
3576        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3577        struct page_counter *counter;
3578
3579        switch (MEMFILE_TYPE(cft->private)) {
3580        case _MEM:
3581                counter = &memcg->memory;
3582                break;
3583        case _MEMSWAP:
3584                counter = &memcg->memsw;
3585                break;
3586        case _KMEM:
3587                counter = &memcg->kmem;
3588                break;
3589        case _TCP:
3590                counter = &memcg->tcpmem;
3591                break;
3592        default:
3593                BUG();
3594        }
3595
3596        switch (MEMFILE_ATTR(cft->private)) {
3597        case RES_USAGE:
3598                if (counter == &memcg->memory)
3599                        return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3600                if (counter == &memcg->memsw)
3601                        return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3602                return (u64)page_counter_read(counter) * PAGE_SIZE;
3603        case RES_LIMIT:
3604                return (u64)counter->max * PAGE_SIZE;
3605        case RES_MAX_USAGE:
3606                return (u64)counter->watermark * PAGE_SIZE;
3607        case RES_FAILCNT:
3608                return counter->failcnt;
3609        case RES_SOFT_LIMIT:
3610                return (u64)memcg->soft_limit * PAGE_SIZE;
3611        default:
3612                BUG();
3613        }
3614}
3615
3616static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3617{
3618        unsigned long stat[MEMCG_NR_STAT] = {0};
3619        struct mem_cgroup *mi;
3620        int node, cpu, i;
3621
3622        for_each_online_cpu(cpu)
3623                for (i = 0; i < MEMCG_NR_STAT; i++)
3624                        stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3625
3626        for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3627                for (i = 0; i < MEMCG_NR_STAT; i++)
3628                        atomic_long_add(stat[i], &mi->vmstats[i]);
3629
3630        for_each_node(node) {
3631                struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3632                struct mem_cgroup_per_node *pi;
3633
3634                for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3635                        stat[i] = 0;
3636
3637                for_each_online_cpu(cpu)
3638                        for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3639                                stat[i] += per_cpu(
3640                                        pn->lruvec_stat_cpu->count[i], cpu);
3641
3642                for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3643                        for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3644                                atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3645        }
3646}
3647
3648static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3649{
3650        unsigned long events[NR_VM_EVENT_ITEMS];
3651        struct mem_cgroup *mi;
3652        int cpu, i;
3653
3654        for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3655                events[i] = 0;
3656
3657        for_each_online_cpu(cpu)
3658                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3659                        events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3660                                             cpu);
3661
3662        for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3663                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3664                        atomic_long_add(events[i], &mi->vmevents[i]);
3665}
3666
3667#ifdef CONFIG_MEMCG_KMEM
3668static int memcg_online_kmem(struct mem_cgroup *memcg)
3669{
3670        struct obj_cgroup *objcg;
3671        int memcg_id;
3672
3673        if (cgroup_memory_nokmem)
3674                return 0;
3675
3676        BUG_ON(memcg->kmemcg_id >= 0);
3677        BUG_ON(memcg->kmem_state);
3678
3679        memcg_id = memcg_alloc_cache_id();
3680        if (memcg_id < 0)
3681                return memcg_id;
3682
3683        objcg = obj_cgroup_alloc();
3684        if (!objcg) {
3685                memcg_free_cache_id(memcg_id);
3686                return -ENOMEM;
3687        }
3688        objcg->memcg = memcg;
3689        rcu_assign_pointer(memcg->objcg, objcg);
3690
3691        static_branch_enable(&memcg_kmem_enabled_key);
3692
3693        memcg->kmemcg_id = memcg_id;
3694        memcg->kmem_state = KMEM_ONLINE;
3695
3696        return 0;
3697}
3698
3699static void memcg_offline_kmem(struct mem_cgroup *memcg)
3700{
3701        struct cgroup_subsys_state *css;
3702        struct mem_cgroup *parent, *child;
3703        int kmemcg_id;
3704
3705        if (memcg->kmem_state != KMEM_ONLINE)
3706                return;
3707
3708        memcg->kmem_state = KMEM_ALLOCATED;
3709
3710        parent = parent_mem_cgroup(memcg);
3711        if (!parent)
3712                parent = root_mem_cgroup;
3713
3714        memcg_reparent_objcgs(memcg, parent);
3715
3716        kmemcg_id = memcg->kmemcg_id;
3717        BUG_ON(kmemcg_id < 0);
3718
3719        /*
3720         * Change kmemcg_id of this cgroup and all its descendants to the
3721         * parent's id, and then move all entries from this cgroup's list_lrus
3722         * to ones of the parent. After we have finished, all list_lrus
3723         * corresponding to this cgroup are guaranteed to remain empty. The
3724         * ordering is imposed by list_lru_node->lock taken by
3725         * memcg_drain_all_list_lrus().
3726         */
3727        rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3728        css_for_each_descendant_pre(css, &memcg->css) {
3729                child = mem_cgroup_from_css(css);
3730                BUG_ON(child->kmemcg_id != kmemcg_id);
3731                child->kmemcg_id = parent->kmemcg_id;
3732        }
3733        rcu_read_unlock();
3734
3735        memcg_drain_all_list_lrus(kmemcg_id, parent);
3736
3737        memcg_free_cache_id(kmemcg_id);
3738}
3739
3740static void memcg_free_kmem(struct mem_cgroup *memcg)
3741{
3742        /* css_alloc() failed, offlining didn't happen */
3743        if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3744                memcg_offline_kmem(memcg);
3745}
3746#else
3747static int memcg_online_kmem(struct mem_cgroup *memcg)
3748{
3749        return 0;
3750}
3751static void memcg_offline_kmem(struct mem_cgroup *memcg)
3752{
3753}
3754static void memcg_free_kmem(struct mem_cgroup *memcg)
3755{
3756}
3757#endif /* CONFIG_MEMCG_KMEM */
3758
3759static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3760                                 unsigned long max)
3761{
3762        int ret;
3763
3764        mutex_lock(&memcg_max_mutex);
3765        ret = page_counter_set_max(&memcg->kmem, max);
3766        mutex_unlock(&memcg_max_mutex);
3767        return ret;
3768}
3769
3770static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3771{
3772        int ret;
3773
3774        mutex_lock(&memcg_max_mutex);
3775
3776        ret = page_counter_set_max(&memcg->tcpmem, max);
3777        if (ret)
3778                goto out;
3779
3780        if (!memcg->tcpmem_active) {
3781                /*
3782                 * The active flag needs to be written after the static_key
3783                 * update. This is what guarantees that the socket activation
3784                 * function is the last one to run. See mem_cgroup_sk_alloc()
3785                 * for details, and note that we don't mark any socket as
3786                 * belonging to this memcg until that flag is up.
3787                 *
3788                 * We need to do this, because static_keys will span multiple
3789                 * sites, but we can't control their order. If we mark a socket
3790                 * as accounted, but the accounting functions are not patched in
3791                 * yet, we'll lose accounting.
3792                 *
3793                 * We never race with the readers in mem_cgroup_sk_alloc(),
3794                 * because when this value change, the code to process it is not
3795                 * patched in yet.
3796                 */
3797                static_branch_inc(&memcg_sockets_enabled_key);
3798                memcg->tcpmem_active = true;
3799        }
3800out:
3801        mutex_unlock(&memcg_max_mutex);
3802        return ret;
3803}
3804
3805/*
3806 * The user of this function is...
3807 * RES_LIMIT.
3808 */
3809static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3810                                char *buf, size_t nbytes, loff_t off)
3811{
3812        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3813        unsigned long nr_pages;
3814        int ret;
3815
3816        buf = strstrip(buf);
3817        ret = page_counter_memparse(buf, "-1", &nr_pages);
3818        if (ret)
3819                return ret;
3820
3821        switch (MEMFILE_ATTR(of_cft(of)->private)) {
3822        case RES_LIMIT:
3823                if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3824                        ret = -EINVAL;
3825                        break;
3826                }
3827                switch (MEMFILE_TYPE(of_cft(of)->private)) {
3828                case _MEM:
3829                        ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3830                        break;
3831                case _MEMSWAP:
3832                        ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3833                        break;
3834                case _KMEM:
3835                        pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3836                                     "Please report your usecase to linux-mm@kvack.org if you "
3837                                     "depend on this functionality.\n");
3838                        ret = memcg_update_kmem_max(memcg, nr_pages);
3839                        break;
3840                case _TCP:
3841                        ret = memcg_update_tcp_max(memcg, nr_pages);
3842                        break;
3843                }
3844                break;
3845        case RES_SOFT_LIMIT:
3846                memcg->soft_limit = nr_pages;
3847                ret = 0;
3848                break;
3849        }
3850        return ret ?: nbytes;
3851}
3852
3853static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3854                                size_t nbytes, loff_t off)
3855{
3856        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3857        struct page_counter *counter;
3858
3859        switch (MEMFILE_TYPE(of_cft(of)->private)) {
3860        case _MEM:
3861                counter = &memcg->memory;
3862                break;
3863        case _MEMSWAP:
3864                counter = &memcg->memsw;
3865                break;
3866        case _KMEM:
3867                counter = &memcg->kmem;
3868                break;
3869        case _TCP:
3870                counter = &memcg->tcpmem;
3871                break;
3872        default:
3873                BUG();
3874        }
3875
3876        switch (MEMFILE_ATTR(of_cft(of)->private)) {
3877        case RES_MAX_USAGE:
3878                page_counter_reset_watermark(counter);
3879                break;
3880        case RES_FAILCNT:
3881                counter->failcnt = 0;
3882                break;
3883        default:
3884                BUG();
3885        }
3886
3887        return nbytes;
3888}
3889
3890static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3891                                        struct cftype *cft)
3892{
3893        return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3894}
3895
3896#ifdef CONFIG_MMU
3897static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3898                                        struct cftype *cft, u64 val)
3899{
3900        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3901
3902        if (val & ~MOVE_MASK)
3903                return -EINVAL;
3904
3905        /*
3906         * No kind of locking is needed in here, because ->can_attach() will
3907         * check this value once in the beginning of the process, and then carry
3908         * on with stale data. This means that changes to this value will only
3909         * affect task migrations starting after the change.
3910         */
3911        memcg->move_charge_at_immigrate = val;
3912        return 0;
3913}
3914#else
3915static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3916                                        struct cftype *cft, u64 val)
3917{
3918        return -ENOSYS;
3919}
3920#endif
3921
3922#ifdef CONFIG_NUMA
3923
3924#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3925#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3926#define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3927
3928static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3929                                int nid, unsigned int lru_mask, bool tree)
3930{
3931        struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3932        unsigned long nr = 0;
3933        enum lru_list lru;
3934
3935        VM_BUG_ON((unsigned)nid >= nr_node_ids);
3936
3937        for_each_lru(lru) {
3938                if (!(BIT(lru) & lru_mask))
3939                        continue;
3940                if (tree)
3941                        nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3942                else
3943                        nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3944        }
3945        return nr;
3946}
3947
3948static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3949                                             unsigned int lru_mask,
3950                                             bool tree)
3951{
3952        unsigned long nr = 0;
3953        enum lru_list lru;
3954
3955        for_each_lru(lru) {
3956                if (!(BIT(lru) & lru_mask))
3957                        continue;
3958                if (tree)
3959                        nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3960                else
3961                        nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3962        }
3963        return nr;
3964}
3965
3966static int memcg_numa_stat_show(struct seq_file *m, void *v)
3967{
3968        struct numa_stat {
3969                const char *name;
3970                unsigned int lru_mask;
3971        };
3972
3973        static const struct numa_stat stats[] = {
3974                { "total", LRU_ALL },
3975                { "file", LRU_ALL_FILE },
3976                { "anon", LRU_ALL_ANON },
3977                { "unevictable", BIT(LRU_UNEVICTABLE) },
3978        };
3979        const struct numa_stat *stat;
3980        int nid;
3981        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3982
3983        for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3984                seq_printf(m, "%s=%lu", stat->name,
3985                           mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3986                                                   false));
3987                for_each_node_state(nid, N_MEMORY)
3988                        seq_printf(m, " N%d=%lu", nid,
3989                                   mem_cgroup_node_nr_lru_pages(memcg, nid,
3990                                                        stat->lru_mask, false));
3991                seq_putc(m, '\n');
3992        }
3993
3994        for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3995
3996                seq_printf(m, "hierarchical_%s=%lu", stat->name,
3997                           mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3998                                                   true));
3999                for_each_node_state(nid, N_MEMORY)
4000                        seq_printf(m, " N%d=%lu", nid,
4001                                   mem_cgroup_node_nr_lru_pages(memcg, nid,
4002                                                        stat->lru_mask, true));
4003                seq_putc(m, '\n');
4004        }
4005
4006        return 0;
4007}
4008#endif /* CONFIG_NUMA */
4009
4010static const unsigned int memcg1_stats[] = {
4011        NR_FILE_PAGES,
4012        NR_ANON_MAPPED,
4013#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4014        NR_ANON_THPS,
4015#endif
4016        NR_SHMEM,
4017        NR_FILE_MAPPED,
4018        NR_FILE_DIRTY,
4019        NR_WRITEBACK,
4020        MEMCG_SWAP,
4021};
4022
4023static const char *const memcg1_stat_names[] = {
4024        "cache",
4025        "rss",
4026#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4027        "rss_huge",
4028#endif
4029        "shmem",
4030        "mapped_file",
4031        "dirty",
4032        "writeback",
4033        "swap",
4034};
4035
4036/* Universal VM events cgroup1 shows, original sort order */
4037static const unsigned int memcg1_events[] = {
4038        PGPGIN,
4039        PGPGOUT,
4040        PGFAULT,
4041        PGMAJFAULT,
4042};
4043
4044static int memcg_stat_show(struct seq_file *m, void *v)
4045{
4046        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4047        unsigned long memory, memsw;
4048        struct mem_cgroup *mi;
4049        unsigned int i;
4050
4051        BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4052
4053        for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4054                unsigned long nr;
4055
4056                if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4057                        continue;
4058                nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4059                seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4060        }
4061
4062        for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4063                seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4064                           memcg_events_local(memcg, memcg1_events[i]));
4065
4066        for (i = 0; i < NR_LRU_LISTS; i++)
4067                seq_printf(m, "%s %lu\n", lru_list_name(i),
4068                           memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4069                           PAGE_SIZE);
4070
4071        /* Hierarchical information */
4072        memory = memsw = PAGE_COUNTER_MAX;
4073        for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4074                memory = min(memory, READ_ONCE(mi->memory.max));
4075                memsw = min(memsw, READ_ONCE(mi->memsw.max));
4076        }
4077        seq_printf(m, "hierarchical_memory_limit %llu\n",
4078                   (u64)memory * PAGE_SIZE);
4079        if (do_memsw_account())
4080                seq_printf(m, "hierarchical_memsw_limit %llu\n",
4081                           (u64)memsw * PAGE_SIZE);
4082
4083        for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4084                unsigned long nr;
4085
4086                if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4087                        continue;
4088                nr = memcg_page_state(memcg, memcg1_stats[i]);
4089                seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4090                                                (u64)nr * PAGE_SIZE);
4091        }
4092
4093        for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4094                seq_printf(m, "total_%s %llu\n",
4095                           vm_event_name(memcg1_events[i]),
4096                           (u64)memcg_events(memcg, memcg1_events[i]));
4097
4098        for (i = 0; i < NR_LRU_LISTS; i++)
4099                seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4100                           (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4101                           PAGE_SIZE);
4102
4103#ifdef CONFIG_DEBUG_VM
4104        {
4105                pg_data_t *pgdat;
4106                struct mem_cgroup_per_node *mz;
4107                unsigned long anon_cost = 0;
4108                unsigned long file_cost = 0;
4109
4110                for_each_online_pgdat(pgdat) {
4111                        mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
4112
4113                        anon_cost += mz->lruvec.anon_cost;
4114                        file_cost += mz->lruvec.file_cost;
4115                }
4116                seq_printf(m, "anon_cost %lu\n", anon_cost);
4117                seq_printf(m, "file_cost %lu\n", file_cost);
4118        }
4119#endif
4120
4121        return 0;
4122}
4123
4124static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4125                                      struct cftype *cft)
4126{
4127        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4128
4129        return mem_cgroup_swappiness(memcg);
4130}
4131
4132static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4133                                       struct cftype *cft, u64 val)
4134{
4135        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4136
4137        if (val > 100)
4138                return -EINVAL;
4139
4140        if (css->parent)
4141                memcg->swappiness = val;
4142        else
4143                vm_swappiness = val;
4144
4145        return 0;
4146}
4147
4148static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4149{
4150        struct mem_cgroup_threshold_ary *t;
4151        unsigned long usage;
4152        int i;
4153
4154        rcu_read_lock();
4155        if (!swap)
4156                t = rcu_dereference(memcg->thresholds.primary);
4157        else
4158                t = rcu_dereference(memcg->memsw_thresholds.primary);
4159
4160        if (!t)
4161                goto unlock;
4162
4163        usage = mem_cgroup_usage(memcg, swap);
4164
4165        /*
4166         * current_threshold points to threshold just below or equal to usage.
4167         * If it's not true, a threshold was crossed after last
4168         * call of __mem_cgroup_threshold().
4169         */
4170        i = t->current_threshold;
4171
4172        /*
4173         * Iterate backward over array of thresholds starting from
4174         * current_threshold and check if a threshold is crossed.
4175         * If none of thresholds below usage is crossed, we read
4176         * only one element of the array here.
4177         */
4178        for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4179                eventfd_signal(t->entries[i].eventfd, 1);
4180
4181        /* i = current_threshold + 1 */
4182        i++;
4183
4184        /*
4185         * Iterate forward over array of thresholds starting from
4186         * current_threshold+1 and check if a threshold is crossed.
4187         * If none of thresholds above usage is crossed, we read
4188         * only one element of the array here.
4189         */
4190        for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4191                eventfd_signal(t->entries[i].eventfd, 1);
4192
4193        /* Update current_threshold */
4194        t->current_threshold = i - 1;
4195unlock:
4196        rcu_read_unlock();
4197}
4198
4199static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4200{
4201        while (memcg) {
4202                __mem_cgroup_threshold(memcg, false);
4203                if (do_memsw_account())
4204                        __mem_cgroup_threshold(memcg, true);
4205
4206                memcg = parent_mem_cgroup(memcg);
4207        }
4208}
4209
4210static int compare_thresholds(const void *a, const void *b)
4211{
4212        const struct mem_cgroup_threshold *_a = a;
4213        const struct mem_cgroup_threshold *_b = b;
4214
4215        if (_a->threshold > _b->threshold)
4216                return 1;
4217
4218        if (_a->threshold < _b->threshold)
4219                return -1;
4220
4221        return 0;
4222}
4223
4224static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4225{
4226        struct mem_cgroup_eventfd_list *ev;
4227
4228        spin_lock(&memcg_oom_lock);
4229
4230        list_for_each_entry(ev, &memcg->oom_notify, list)
4231                eventfd_signal(ev->eventfd, 1);
4232
4233        spin_unlock(&memcg_oom_lock);
4234        return 0;
4235}
4236
4237static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4238{
4239        struct mem_cgroup *iter;
4240
4241        for_each_mem_cgroup_tree(iter, memcg)
4242                mem_cgroup_oom_notify_cb(iter);
4243}
4244
4245static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4246        struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4247{
4248        struct mem_cgroup_thresholds *thresholds;
4249        struct mem_cgroup_threshold_ary *new;
4250        unsigned long threshold;
4251        unsigned long usage;
4252        int i, size, ret;
4253
4254        ret = page_counter_memparse(args, "-1", &threshold);
4255        if (ret)
4256                return ret;
4257
4258        mutex_lock(&memcg->thresholds_lock);
4259
4260        if (type == _MEM) {
4261                thresholds = &memcg->thresholds;
4262                usage = mem_cgroup_usage(memcg, false);
4263        } else if (type == _MEMSWAP) {
4264                thresholds = &memcg->memsw_thresholds;
4265                usage = mem_cgroup_usage(memcg, true);
4266        } else
4267                BUG();
4268
4269        /* Check if a threshold crossed before adding a new one */
4270        if (thresholds->primary)
4271                __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4272
4273        size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4274
4275        /* Allocate memory for new array of thresholds */
4276        new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4277        if (!new) {
4278                ret = -ENOMEM;
4279                goto unlock;
4280        }
4281        new->size = size;
4282
4283        /* Copy thresholds (if any) to new array */
4284        if (thresholds->primary)
4285                memcpy(new->entries, thresholds->primary->entries,
4286                       flex_array_size(new, entries, size - 1));
4287
4288        /* Add new threshold */
4289        new->entries[size - 1].eventfd = eventfd;
4290        new->entries[size - 1].threshold = threshold;
4291
4292        /* Sort thresholds. Registering of new threshold isn't time-critical */
4293        sort(new->entries, size, sizeof(*new->entries),
4294                        compare_thresholds, NULL);
4295
4296        /* Find current threshold */
4297        new->current_threshold = -1;
4298        for (i = 0; i < size; i++) {
4299                if (new->entries[i].threshold <= usage) {
4300                        /*
4301                         * new->current_threshold will not be used until
4302                         * rcu_assign_pointer(), so it's safe to increment
4303                         * it here.
4304                         */
4305                        ++new->current_threshold;
4306                } else
4307                        break;
4308        }
4309
4310        /* Free old spare buffer and save old primary buffer as spare */
4311        kfree(thresholds->spare);
4312        thresholds->spare = thresholds->primary;
4313
4314        rcu_assign_pointer(thresholds->primary, new);
4315
4316        /* To be sure that nobody uses thresholds */
4317        synchronize_rcu();
4318
4319unlock:
4320        mutex_unlock(&memcg->thresholds_lock);
4321
4322        return ret;
4323}
4324
4325static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4326        struct eventfd_ctx *eventfd, const char *args)
4327{
4328        return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4329}
4330
4331static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4332        struct eventfd_ctx *eventfd, const char *args)
4333{
4334        return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4335}
4336
4337static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4338        struct eventfd_ctx *eventfd, enum res_type type)
4339{
4340        struct mem_cgroup_thresholds *thresholds;
4341        struct mem_cgroup_threshold_ary *new;
4342        unsigned long usage;
4343        int i, j, size, entries;
4344
4345        mutex_lock(&memcg->thresholds_lock);
4346
4347        if (type == _MEM) {
4348                thresholds = &memcg->thresholds;
4349                usage = mem_cgroup_usage(memcg, false);
4350        } else if (type == _MEMSWAP) {
4351                thresholds = &memcg->memsw_thresholds;
4352                usage = mem_cgroup_usage(memcg, true);
4353        } else
4354                BUG();
4355
4356        if (!thresholds->primary)
4357                goto unlock;
4358
4359        /* Check if a threshold crossed before removing */
4360        __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4361
4362        /* Calculate new number of threshold */
4363        size = entries = 0;
4364        for (i = 0; i < thresholds->primary->size; i++) {
4365                if (thresholds->primary->entries[i].eventfd != eventfd)
4366                        size++;
4367                else
4368                        entries++;
4369        }
4370
4371        new = thresholds->spare;
4372
4373        /* If no items related to eventfd have been cleared, nothing to do */
4374        if (!entries)
4375                goto unlock;
4376
4377        /* Set thresholds array to NULL if we don't have thresholds */
4378        if (!size) {
4379                kfree(new);
4380                new = NULL;
4381                goto swap_buffers;
4382        }
4383
4384        new->size = size;
4385
4386        /* Copy thresholds and find current threshold */
4387        new->current_threshold = -1;
4388        for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4389                if (thresholds->primary->entries[i].eventfd == eventfd)
4390                        continue;
4391
4392                new->entries[j] = thresholds->primary->entries[i];
4393                if (new->entries[j].threshold <= usage) {
4394                        /*
4395                         * new->current_threshold will not be used
4396                         * until rcu_assign_pointer(), so it's safe to increment
4397                         * it here.
4398                         */
4399                        ++new->current_threshold;
4400                }
4401                j++;
4402        }
4403
4404swap_buffers:
4405        /* Swap primary and spare array */
4406        thresholds->spare = thresholds->primary;
4407
4408        rcu_assign_pointer(thresholds->primary, new);
4409
4410        /* To be sure that nobody uses thresholds */
4411        synchronize_rcu();
4412
4413        /* If all events are unregistered, free the spare array */
4414        if (!new) {
4415                kfree(thresholds->spare);
4416                thresholds->spare = NULL;
4417        }
4418unlock:
4419        mutex_unlock(&memcg->thresholds_lock);
4420}
4421
4422static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4423        struct eventfd_ctx *eventfd)
4424{
4425        return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4426}
4427
4428static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4429        struct eventfd_ctx *eventfd)
4430{
4431        return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4432}
4433
4434static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4435        struct eventfd_ctx *eventfd, const char *args)
4436{
4437        struct mem_cgroup_eventfd_list *event;
4438
4439        event = kmalloc(sizeof(*event), GFP_KERNEL);
4440        if (!event)
4441                return -ENOMEM;
4442
4443        spin_lock(&memcg_oom_lock);
4444
4445        event->eventfd = eventfd;
4446        list_add(&event->list, &memcg->oom_notify);
4447
4448        /* already in OOM ? */
4449        if (memcg->under_oom)
4450                eventfd_signal(eventfd, 1);
4451        spin_unlock(&memcg_oom_lock);
4452
4453        return 0;
4454}
4455
4456static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4457        struct eventfd_ctx *eventfd)
4458{
4459        struct mem_cgroup_eventfd_list *ev, *tmp;
4460
4461        spin_lock(&memcg_oom_lock);
4462
4463        list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4464                if (ev->eventfd == eventfd) {
4465                        list_del(&ev->list);
4466                        kfree(ev);
4467                }
4468        }
4469
4470        spin_unlock(&memcg_oom_lock);
4471}
4472
4473static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4474{
4475        struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4476
4477        seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4478        seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4479        seq_printf(sf, "oom_kill %lu\n",
4480                   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4481        return 0;
4482}
4483
4484static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4485        struct cftype *cft, u64 val)
4486{
4487        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4488
4489        /* cannot set to root cgroup and only 0 and 1 are allowed */
4490        if (!css->parent || !((val == 0) || (val == 1)))
4491                return -EINVAL;
4492
4493        memcg->oom_kill_disable = val;
4494        if (!val)
4495                memcg_oom_recover(memcg);
4496
4497        return 0;
4498}
4499
4500#ifdef CONFIG_CGROUP_WRITEBACK
4501
4502#include <trace/events/writeback.h>
4503
4504static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4505{
4506        return wb_domain_init(&memcg->cgwb_domain, gfp);
4507}
4508
4509static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4510{
4511        wb_domain_exit(&memcg->cgwb_domain);
4512}
4513
4514static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4515{
4516        wb_domain_size_changed(&memcg->cgwb_domain);
4517}
4518
4519struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4520{
4521        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4522
4523        if (!memcg->css.parent)
4524                return NULL;
4525
4526        return &memcg->cgwb_domain;
4527}
4528
4529/*
4530 * idx can be of type enum memcg_stat_item or node_stat_item.
4531 * Keep in sync with memcg_exact_page().
4532 */
4533static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4534{
4535        long x = atomic_long_read(&memcg->vmstats[idx]);
4536        int cpu;
4537
4538        for_each_online_cpu(cpu)
4539                x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4540        if (x < 0)
4541                x = 0;
4542        return x;
4543}
4544
4545/**
4546 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4547 * @wb: bdi_writeback in question
4548 * @pfilepages: out parameter for number of file pages
4549 * @pheadroom: out parameter for number of allocatable pages according to memcg
4550 * @pdirty: out parameter for number of dirty pages
4551 * @pwriteback: out parameter for number of pages under writeback
4552 *
4553 * Determine the numbers of file, headroom, dirty, and writeback pages in
4554 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4555 * is a bit more involved.
4556 *
4557 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4558 * headroom is calculated as the lowest headroom of itself and the
4559 * ancestors.  Note that this doesn't consider the actual amount of
4560 * available memory in the system.  The caller should further cap
4561 * *@pheadroom accordingly.
4562 */
4563void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4564                         unsigned long *pheadroom, unsigned long *pdirty,
4565                         unsigned long *pwriteback)
4566{
4567        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4568        struct mem_cgroup *parent;
4569
4570        *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4571
4572        *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4573        *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4574                        memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4575        *pheadroom = PAGE_COUNTER_MAX;
4576
4577        while ((parent = parent_mem_cgroup(memcg))) {
4578                unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4579                                            READ_ONCE(memcg->memory.high));
4580                unsigned long used = page_counter_read(&memcg->memory);
4581
4582                *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4583                memcg = parent;
4584        }
4585}
4586
4587/*
4588 * Foreign dirty flushing
4589 *
4590 * There's an inherent mismatch between memcg and writeback.  The former
4591 * trackes ownership per-page while the latter per-inode.  This was a
4592 * deliberate design decision because honoring per-page ownership in the
4593 * writeback path is complicated, may lead to higher CPU and IO overheads
4594 * and deemed unnecessary given that write-sharing an inode across
4595 * different cgroups isn't a common use-case.
4596 *
4597 * Combined with inode majority-writer ownership switching, this works well
4598 * enough in most cases but there are some pathological cases.  For
4599 * example, let's say there are two cgroups A and B which keep writing to
4600 * different but confined parts of the same inode.  B owns the inode and
4601 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4602 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4603 * triggering background writeback.  A will be slowed down without a way to
4604 * make writeback of the dirty pages happen.
4605 *
4606 * Conditions like the above can lead to a cgroup getting repatedly and
4607 * severely throttled after making some progress after each
4608 * dirty_expire_interval while the underyling IO device is almost
4609 * completely idle.
4610 *
4611 * Solving this problem completely requires matching the ownership tracking
4612 * granularities between memcg and writeback in either direction.  However,
4613 * the more egregious behaviors can be avoided by simply remembering the
4614 * most recent foreign dirtying events and initiating remote flushes on
4615 * them when local writeback isn't enough to keep the memory clean enough.
4616 *
4617 * The following two functions implement such mechanism.  When a foreign
4618 * page - a page whose memcg and writeback ownerships don't match - is
4619 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4620 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4621 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4622 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4623 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4624 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4625 * limited to MEMCG_CGWB_FRN_CNT.
4626 *
4627 * The mechanism only remembers IDs and doesn't hold any object references.
4628 * As being wrong occasionally doesn't matter, updates and accesses to the
4629 * records are lockless and racy.
4630 */
4631void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4632                                             struct bdi_writeback *wb)
4633{
4634        struct mem_cgroup *memcg = page_memcg(page);
4635        struct memcg_cgwb_frn *frn;
4636        u64 now = get_jiffies_64();
4637        u64 oldest_at = now;
4638        int oldest = -1;
4639        int i;
4640
4641        trace_track_foreign_dirty(page, wb);
4642
4643        /*
4644         * Pick the slot to use.  If there is already a slot for @wb, keep
4645         * using it.  If not replace the oldest one which isn't being
4646         * written out.
4647         */
4648        for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4649                frn = &memcg->cgwb_frn[i];
4650                if (frn->bdi_id == wb->bdi->id &&
4651                    frn->memcg_id == wb->memcg_css->id)
4652                        break;
4653                if (time_before64(frn->at, oldest_at) &&
4654                    atomic_read(&frn->done.cnt) == 1) {
4655                        oldest = i;
4656                        oldest_at = frn->at;
4657                }
4658        }
4659
4660        if (i < MEMCG_CGWB_FRN_CNT) {
4661                /*
4662                 * Re-using an existing one.  Update timestamp lazily to
4663                 * avoid making the cacheline hot.  We want them to be
4664                 * reasonably up-to-date and significantly shorter than
4665                 * dirty_expire_interval as that's what expires the record.
4666                 * Use the shorter of 1s and dirty_expire_interval / 8.
4667                 */
4668                unsigned long update_intv =
4669                        min_t(unsigned long, HZ,
4670                              msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4671
4672                if (time_before64(frn->at, now - update_intv))
4673                        frn->at = now;
4674        } else if (oldest >= 0) {
4675                /* replace the oldest free one */
4676                frn = &memcg->cgwb_frn[oldest];
4677                frn->bdi_id = wb->bdi->id;
4678                frn->memcg_id = wb->memcg_css->id;
4679                frn->at = now;
4680        }
4681}
4682
4683/* issue foreign writeback flushes for recorded foreign dirtying events */
4684void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4685{
4686        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4687        unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4688        u64 now = jiffies_64;
4689        int i;
4690
4691        for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4692                struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4693
4694                /*
4695                 * If the record is older than dirty_expire_interval,
4696                 * writeback on it has already started.  No need to kick it
4697                 * off again.  Also, don't start a new one if there's
4698                 * already one in flight.
4699                 */
4700                if (time_after64(frn->at, now - intv) &&
4701                    atomic_read(&frn->done.cnt) == 1) {
4702                        frn->at = 0;
4703                        trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4704                        cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4705                                               WB_REASON_FOREIGN_FLUSH,
4706                                               &frn->done);
4707                }
4708        }
4709}
4710
4711#else   /* CONFIG_CGROUP_WRITEBACK */
4712
4713static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4714{
4715        return 0;
4716}
4717
4718static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4719{
4720}
4721
4722static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4723{
4724}
4725
4726#endif  /* CONFIG_CGROUP_WRITEBACK */
4727
4728/*
4729 * DO NOT USE IN NEW FILES.
4730 *
4731 * "cgroup.event_control" implementation.
4732 *
4733 * This is way over-engineered.  It tries to support fully configurable
4734 * events for each user.  Such level of flexibility is completely
4735 * unnecessary especially in the light of the planned unified hierarchy.
4736 *
4737 * Please deprecate this and replace with something simpler if at all
4738 * possible.
4739 */
4740
4741/*
4742 * Unregister event and free resources.
4743 *
4744 * Gets called from workqueue.
4745 */
4746static void memcg_event_remove(struct work_struct *work)
4747{
4748        struct mem_cgroup_event *event =
4749                container_of(work, struct mem_cgroup_event, remove);
4750        struct mem_cgroup *memcg = event->memcg;
4751
4752        remove_wait_queue(event->wqh, &event->wait);
4753
4754        event->unregister_event(memcg, event->eventfd);
4755
4756        /* Notify userspace the event is going away. */
4757        eventfd_signal(event->eventfd, 1);
4758
4759        eventfd_ctx_put(event->eventfd);
4760        kfree(event);
4761        css_put(&memcg->css);
4762}
4763
4764/*
4765 * Gets called on EPOLLHUP on eventfd when user closes it.
4766 *
4767 * Called with wqh->lock held and interrupts disabled.
4768 */
4769static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4770                            int sync, void *key)
4771{
4772        struct mem_cgroup_event *event =
4773                container_of(wait, struct mem_cgroup_event, wait);
4774        struct mem_cgroup *memcg = event->memcg;
4775        __poll_t flags = key_to_poll(key);
4776
4777        if (flags & EPOLLHUP) {
4778                /*
4779                 * If the event has been detached at cgroup removal, we
4780                 * can simply return knowing the other side will cleanup
4781                 * for us.
4782                 *
4783                 * We can't race against event freeing since the other
4784                 * side will require wqh->lock via remove_wait_queue(),
4785                 * which we hold.
4786                 */
4787                spin_lock(&memcg->event_list_lock);
4788                if (!list_empty(&event->list)) {
4789                        list_del_init(&event->list);
4790                        /*
4791                         * We are in atomic context, but cgroup_event_remove()
4792                         * may sleep, so we have to call it in workqueue.
4793                         */
4794                        schedule_work(&event->remove);
4795                }
4796                spin_unlock(&memcg->event_list_lock);
4797        }
4798
4799        return 0;
4800}
4801
4802static void memcg_event_ptable_queue_proc(struct file *file,
4803                wait_queue_head_t *wqh, poll_table *pt)
4804{
4805        struct mem_cgroup_event *event =
4806                container_of(pt, struct mem_cgroup_event, pt);
4807
4808        event->wqh = wqh;
4809        add_wait_queue(wqh, &event->wait);
4810}
4811
4812/*
4813 * DO NOT USE IN NEW FILES.
4814 *
4815 * Parse input and register new cgroup event handler.
4816 *
4817 * Input must be in format '<event_fd> <control_fd> <args>'.
4818 * Interpretation of args is defined by control file implementation.
4819 */
4820static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4821                                         char *buf, size_t nbytes, loff_t off)
4822{
4823        struct cgroup_subsys_state *css = of_css(of);
4824        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4825        struct mem_cgroup_event *event;
4826        struct cgroup_subsys_state *cfile_css;
4827        unsigned int efd, cfd;
4828        struct fd efile;
4829        struct fd cfile;
4830        const char *name;
4831        char *endp;
4832        int ret;
4833
4834        buf = strstrip(buf);
4835
4836        efd = simple_strtoul(buf, &endp, 10);
4837        if (*endp != ' ')
4838                return -EINVAL;
4839        buf = endp + 1;
4840
4841        cfd = simple_strtoul(buf, &endp, 10);
4842        if ((*endp != ' ') && (*endp != '\0'))
4843                return -EINVAL;
4844        buf = endp + 1;
4845
4846        event = kzalloc(sizeof(*event), GFP_KERNEL);
4847        if (!event)
4848                return -ENOMEM;
4849
4850        event->memcg = memcg;
4851        INIT_LIST_HEAD(&event->list);
4852        init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4853        init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4854        INIT_WORK(&event->remove, memcg_event_remove);
4855
4856        efile = fdget(efd);
4857        if (!efile.file) {
4858                ret = -EBADF;
4859                goto out_kfree;
4860        }
4861
4862        event->eventfd = eventfd_ctx_fileget(efile.file);
4863        if (IS_ERR(event->eventfd)) {
4864                ret = PTR_ERR(event->eventfd);
4865                goto out_put_efile;
4866        }
4867
4868        cfile = fdget(cfd);
4869        if (!cfile.file) {
4870                ret = -EBADF;
4871                goto out_put_eventfd;
4872        }
4873
4874        /* the process need read permission on control file */
4875        /* AV: shouldn't we check that it's been opened for read instead? */
4876        ret = file_permission(cfile.file, MAY_READ);
4877        if (ret < 0)
4878                goto out_put_cfile;
4879
4880        /*
4881         * Determine the event callbacks and set them in @event.  This used
4882         * to be done via struct cftype but cgroup core no longer knows
4883         * about these events.  The following is crude but the whole thing
4884         * is for compatibility anyway.
4885         *
4886         * DO NOT ADD NEW FILES.
4887         */
4888        name = cfile.file->f_path.dentry->d_name.name;
4889
4890        if (!strcmp(name, "memory.usage_in_bytes")) {
4891                event->register_event = mem_cgroup_usage_register_event;
4892                event->unregister_event = mem_cgroup_usage_unregister_event;
4893        } else if (!strcmp(name, "memory.oom_control")) {
4894                event->register_event = mem_cgroup_oom_register_event;
4895                event->unregister_event = mem_cgroup_oom_unregister_event;
4896        } else if (!strcmp(name, "memory.pressure_level")) {
4897                event->register_event = vmpressure_register_event;
4898                event->unregister_event = vmpressure_unregister_event;
4899        } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4900                event->register_event = memsw_cgroup_usage_register_event;
4901                event->unregister_event = memsw_cgroup_usage_unregister_event;
4902        } else {
4903                ret = -EINVAL;
4904                goto out_put_cfile;
4905        }
4906
4907        /*
4908         * Verify @cfile should belong to @css.  Also, remaining events are
4909         * automatically removed on cgroup destruction but the removal is
4910         * asynchronous, so take an extra ref on @css.
4911         */
4912        cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4913                                               &memory_cgrp_subsys);
4914        ret = -EINVAL;
4915        if (IS_ERR(cfile_css))
4916                goto out_put_cfile;
4917        if (cfile_css != css) {
4918                css_put(cfile_css);
4919                goto out_put_cfile;
4920        }
4921
4922        ret = event->register_event(memcg, event->eventfd, buf);
4923        if (ret)
4924                goto out_put_css;
4925
4926        vfs_poll(efile.file, &event->pt);
4927
4928        spin_lock(&memcg->event_list_lock);
4929        list_add(&event->list, &memcg->event_list);
4930        spin_unlock(&memcg->event_list_lock);
4931
4932        fdput(cfile);
4933        fdput(efile);
4934
4935        return nbytes;
4936
4937out_put_css:
4938        css_put(css);
4939out_put_cfile:
4940        fdput(cfile);
4941out_put_eventfd:
4942        eventfd_ctx_put(event->eventfd);
4943out_put_efile:
4944        fdput(efile);
4945out_kfree:
4946        kfree(event);
4947
4948        return ret;
4949}
4950
4951static struct cftype mem_cgroup_legacy_files[] = {
4952        {
4953                .name = "usage_in_bytes",
4954                .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4955                .read_u64 = mem_cgroup_read_u64,
4956        },
4957        {
4958                .name = "max_usage_in_bytes",
4959                .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4960                .write = mem_cgroup_reset,
4961                .read_u64 = mem_cgroup_read_u64,
4962        },
4963        {
4964                .name = "limit_in_bytes",
4965                .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4966                .write = mem_cgroup_write,
4967                .read_u64 = mem_cgroup_read_u64,
4968        },
4969        {
4970                .name = "soft_limit_in_bytes",
4971                .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4972                .write = mem_cgroup_write,
4973                .read_u64 = mem_cgroup_read_u64,
4974        },
4975        {
4976                .name = "failcnt",
4977                .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4978                .write = mem_cgroup_reset,
4979                .read_u64 = mem_cgroup_read_u64,
4980        },
4981        {
4982                .name = "stat",
4983                .seq_show = memcg_stat_show,
4984        },
4985        {
4986                .name = "force_empty",
4987                .write = mem_cgroup_force_empty_write,
4988        },
4989        {
4990                .name = "use_hierarchy",
4991                .write_u64 = mem_cgroup_hierarchy_write,
4992                .read_u64 = mem_cgroup_hierarchy_read,
4993        },
4994        {
4995                .name = "cgroup.event_control",         /* XXX: for compat */
4996                .write = memcg_write_event_control,
4997                .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4998        },
4999        {
5000                .name = "swappiness",
5001                .read_u64 = mem_cgroup_swappiness_read,
5002                .write_u64 = mem_cgroup_swappiness_write,
5003        },
5004        {
5005                .name = "move_charge_at_immigrate",
5006                .read_u64 = mem_cgroup_move_charge_read,
5007                .write_u64 = mem_cgroup_move_charge_write,
5008        },
5009        {
5010                .name = "oom_control",
5011                .seq_show = mem_cgroup_oom_control_read,
5012                .write_u64 = mem_cgroup_oom_control_write,
5013                .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5014        },
5015        {
5016                .name = "pressure_level",
5017        },
5018#ifdef CONFIG_NUMA
5019        {
5020                .name = "numa_stat",
5021                .seq_show = memcg_numa_stat_show,
5022        },
5023#endif
5024        {
5025                .name = "kmem.limit_in_bytes",
5026                .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5027                .write = mem_cgroup_write,
5028                .read_u64 = mem_cgroup_read_u64,
5029        },
5030        {
5031                .name = "kmem.usage_in_bytes",
5032                .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5033                .read_u64 = mem_cgroup_read_u64,
5034        },
5035        {
5036                .name = "kmem.failcnt",
5037                .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5038                .write = mem_cgroup_reset,
5039                .read_u64 = mem_cgroup_read_u64,
5040        },
5041        {
5042                .name = "kmem.max_usage_in_bytes",
5043                .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5044                .write = mem_cgroup_reset,
5045                .read_u64 = mem_cgroup_read_u64,
5046        },
5047#if defined(CONFIG_MEMCG_KMEM) && \
5048        (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5049        {
5050                .name = "kmem.slabinfo",
5051                .seq_show = memcg_slab_show,
5052        },
5053#endif
5054        {
5055                .name = "kmem.tcp.limit_in_bytes",
5056                .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5057                .write = mem_cgroup_write,
5058                .read_u64 = mem_cgroup_read_u64,
5059        },
5060        {
5061                .name = "kmem.tcp.usage_in_bytes",
5062                .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5063                .read_u64 = mem_cgroup_read_u64,
5064        },
5065        {
5066                .name = "kmem.tcp.failcnt",
5067                .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5068                .write = mem_cgroup_reset,
5069                .read_u64 = mem_cgroup_read_u64,
5070        },
5071        {
5072                .name = "kmem.tcp.max_usage_in_bytes",
5073                .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5074                .write = mem_cgroup_reset,
5075                .read_u64 = mem_cgroup_read_u64,
5076        },
5077        { },    /* terminate */
5078};
5079
5080/*
5081 * Private memory cgroup IDR
5082 *
5083 * Swap-out records and page cache shadow entries need to store memcg
5084 * references in constrained space, so we maintain an ID space that is
5085 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5086 * memory-controlled cgroups to 64k.
5087 *
5088 * However, there usually are many references to the offline CSS after
5089 * the cgroup has been destroyed, such as page cache or reclaimable
5090 * slab objects, that don't need to hang on to the ID. We want to keep
5091 * those dead CSS from occupying IDs, or we might quickly exhaust the
5092 * relatively small ID space and prevent the creation of new cgroups
5093 * even when there are much fewer than 64k cgroups - possibly none.
5094 *
5095 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5096 * be freed and recycled when it's no longer needed, which is usually
5097 * when the CSS is offlined.
5098 *
5099 * The only exception to that are records of swapped out tmpfs/shmem
5100 * pages that need to be attributed to live ancestors on swapin. But
5101 * those references are manageable from userspace.
5102 */
5103
5104static DEFINE_IDR(mem_cgroup_idr);
5105
5106static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5107{
5108        if (memcg->id.id > 0) {
5109                idr_remove(&mem_cgroup_idr, memcg->id.id);
5110                memcg->id.id = 0;
5111        }
5112}
5113
5114static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5115                                                  unsigned int n)
5116{
5117        refcount_add(n, &memcg->id.ref);
5118}
5119
5120static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5121{
5122        if (refcount_sub_and_test(n, &memcg->id.ref)) {
5123                mem_cgroup_id_remove(memcg);
5124
5125                /* Memcg ID pins CSS */
5126                css_put(&memcg->css);
5127        }
5128}
5129
5130static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5131{
5132        mem_cgroup_id_put_many(memcg, 1);
5133}
5134
5135/**
5136 * mem_cgroup_from_id - look up a memcg from a memcg id
5137 * @id: the memcg id to look up
5138 *
5139 * Caller must hold rcu_read_lock().
5140 */
5141struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5142{
5143        WARN_ON_ONCE(!rcu_read_lock_held());
5144        return idr_find(&mem_cgroup_idr, id);
5145}
5146
5147static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5148{
5149        struct mem_cgroup_per_node *pn;
5150        int tmp = node;
5151        /*
5152         * This routine is called against possible nodes.
5153         * But it's BUG to call kmalloc() against offline node.
5154         *
5155         * TODO: this routine can waste much memory for nodes which will
5156         *       never be onlined. It's better to use memory hotplug callback
5157         *       function.
5158         */
5159        if (!node_state(node, N_NORMAL_MEMORY))
5160                tmp = -1;
5161        pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5162        if (!pn)
5163                return 1;
5164
5165        pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5166                                                 GFP_KERNEL_ACCOUNT);
5167        if (!pn->lruvec_stat_local) {
5168                kfree(pn);
5169                return 1;
5170        }
5171
5172        pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat,
5173                                               GFP_KERNEL_ACCOUNT);
5174        if (!pn->lruvec_stat_cpu) {
5175                free_percpu(pn->lruvec_stat_local);
5176                kfree(pn);
5177                return 1;
5178        }
5179
5180        lruvec_init(&pn->lruvec);
5181        pn->usage_in_excess = 0;
5182        pn->on_tree = false;
5183        pn->memcg = memcg;
5184
5185        memcg->nodeinfo[node] = pn;
5186        return 0;
5187}
5188
5189static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5190{
5191        struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5192
5193        if (!pn)
5194                return;
5195
5196        free_percpu(pn->lruvec_stat_cpu);
5197        free_percpu(pn->lruvec_stat_local);
5198        kfree(pn);
5199}
5200
5201static void __mem_cgroup_free(struct mem_cgroup *memcg)
5202{
5203        int node;
5204
5205        for_each_node(node)
5206                free_mem_cgroup_per_node_info(memcg, node);
5207        free_percpu(memcg->vmstats_percpu);
5208        free_percpu(memcg->vmstats_local);
5209        kfree(memcg);
5210}
5211
5212static void mem_cgroup_free(struct mem_cgroup *memcg)
5213{
5214        memcg_wb_domain_exit(memcg);
5215        /*
5216         * Flush percpu vmstats and vmevents to guarantee the value correctness
5217         * on parent's and all ancestor levels.
5218         */
5219        memcg_flush_percpu_vmstats(memcg);
5220        memcg_flush_percpu_vmevents(memcg);
5221        __mem_cgroup_free(memcg);
5222}
5223
5224static struct mem_cgroup *mem_cgroup_alloc(void)
5225{
5226        struct mem_cgroup *memcg;
5227        unsigned int size;
5228        int node;
5229        int __maybe_unused i;
5230        long error = -ENOMEM;
5231
5232        size = sizeof(struct mem_cgroup);
5233        size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5234
5235        memcg = kzalloc(size, GFP_KERNEL);
5236        if (!memcg)
5237                return ERR_PTR(error);
5238
5239        memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5240                                 1, MEM_CGROUP_ID_MAX,
5241                                 GFP_KERNEL);
5242        if (memcg->id.id < 0) {
5243                error = memcg->id.id;
5244                goto fail;
5245        }
5246
5247        memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5248                                                GFP_KERNEL_ACCOUNT);
5249        if (!memcg->vmstats_local)
5250                goto fail;
5251
5252        memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5253                                                 GFP_KERNEL_ACCOUNT);
5254        if (!memcg->vmstats_percpu)
5255                goto fail;
5256
5257        for_each_node(node)
5258                if (alloc_mem_cgroup_per_node_info(memcg, node))
5259                        goto fail;
5260
5261        if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5262                goto fail;
5263
5264        INIT_WORK(&memcg->high_work, high_work_func);
5265        INIT_LIST_HEAD(&memcg->oom_notify);
5266        mutex_init(&memcg->thresholds_lock);
5267        spin_lock_init(&memcg->move_lock);
5268        vmpressure_init(&memcg->vmpressure);
5269        INIT_LIST_HEAD(&memcg->event_list);
5270        spin_lock_init(&memcg->event_list_lock);
5271        memcg->socket_pressure = jiffies;
5272#ifdef CONFIG_MEMCG_KMEM
5273        memcg->kmemcg_id = -1;
5274        INIT_LIST_HEAD(&memcg->objcg_list);
5275#endif
5276#ifdef CONFIG_CGROUP_WRITEBACK
5277        INIT_LIST_HEAD(&memcg->cgwb_list);
5278        for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5279                memcg->cgwb_frn[i].done =
5280                        __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5281#endif
5282#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5283        spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5284        INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5285        memcg->deferred_split_queue.split_queue_len = 0;
5286#endif
5287        idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5288        return memcg;
5289fail:
5290        mem_cgroup_id_remove(memcg);
5291        __mem_cgroup_free(memcg);
5292        return ERR_PTR(error);
5293}
5294
5295static struct cgroup_subsys_state * __ref
5296mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5297{
5298        struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5299        struct mem_cgroup *memcg, *old_memcg;
5300        long error = -ENOMEM;
5301
5302        old_memcg = set_active_memcg(parent);
5303        memcg = mem_cgroup_alloc();
5304        set_active_memcg(old_memcg);
5305        if (IS_ERR(memcg))
5306                return ERR_CAST(memcg);
5307
5308        page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5309        memcg->soft_limit = PAGE_COUNTER_MAX;
5310        page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5311        if (parent) {
5312                memcg->swappiness = mem_cgroup_swappiness(parent);
5313                memcg->oom_kill_disable = parent->oom_kill_disable;
5314
5315                page_counter_init(&memcg->memory, &parent->memory);
5316                page_counter_init(&memcg->swap, &parent->swap);
5317                page_counter_init(&memcg->kmem, &parent->kmem);
5318                page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5319        } else {
5320                page_counter_init(&memcg->memory, NULL);
5321                page_counter_init(&memcg->swap, NULL);
5322                page_counter_init(&memcg->kmem, NULL);
5323                page_counter_init(&memcg->tcpmem, NULL);
5324
5325                root_mem_cgroup = memcg;
5326                return &memcg->css;
5327        }
5328
5329        /* The following stuff does not apply to the root */
5330        error = memcg_online_kmem(memcg);
5331        if (error)
5332                goto fail;
5333
5334        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5335                static_branch_inc(&memcg_sockets_enabled_key);
5336
5337        return &memcg->css;
5338fail:
5339        mem_cgroup_id_remove(memcg);
5340        mem_cgroup_free(memcg);
5341        return ERR_PTR(error);
5342}
5343
5344static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5345{
5346        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5347
5348        /*
5349         * A memcg must be visible for memcg_expand_shrinker_maps()
5350         * by the time the maps are allocated. So, we allocate maps
5351         * here, when for_each_mem_cgroup() can't skip it.
5352         */
5353        if (memcg_alloc_shrinker_maps(memcg)) {
5354                mem_cgroup_id_remove(memcg);
5355                return -ENOMEM;
5356        }
5357
5358        /* Online state pins memcg ID, memcg ID pins CSS */
5359        refcount_set(&memcg->id.ref, 1);
5360        css_get(css);
5361        return 0;
5362}
5363
5364static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5365{
5366        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5367        struct mem_cgroup_event *event, *tmp;
5368
5369        /*
5370         * Unregister events and notify userspace.
5371         * Notify userspace about cgroup removing only after rmdir of cgroup
5372         * directory to avoid race between userspace and kernelspace.
5373         */
5374        spin_lock(&memcg->event_list_lock);
5375        list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5376                list_del_init(&event->list);
5377                schedule_work(&event->remove);
5378        }
5379        spin_unlock(&memcg->event_list_lock);
5380
5381        page_counter_set_min(&memcg->memory, 0);
5382        page_counter_set_low(&memcg->memory, 0);
5383
5384        memcg_offline_kmem(memcg);
5385        wb_memcg_offline(memcg);
5386
5387        drain_all_stock(memcg);
5388
5389        mem_cgroup_id_put(memcg);
5390}
5391
5392static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5393{
5394        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5395
5396        invalidate_reclaim_iterators(memcg);
5397}
5398
5399static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5400{
5401        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5402        int __maybe_unused i;
5403
5404#ifdef CONFIG_CGROUP_WRITEBACK
5405        for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5406                wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5407#endif
5408        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5409                static_branch_dec(&memcg_sockets_enabled_key);
5410
5411        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5412                static_branch_dec(&memcg_sockets_enabled_key);
5413
5414        vmpressure_cleanup(&memcg->vmpressure);
5415        cancel_work_sync(&memcg->high_work);
5416        mem_cgroup_remove_from_trees(memcg);
5417        memcg_free_shrinker_maps(memcg);
5418        memcg_free_kmem(memcg);
5419        mem_cgroup_free(memcg);
5420}
5421
5422/**
5423 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5424 * @css: the target css
5425 *
5426 * Reset the states of the mem_cgroup associated with @css.  This is
5427 * invoked when the userland requests disabling on the default hierarchy
5428 * but the memcg is pinned through dependency.  The memcg should stop
5429 * applying policies and should revert to the vanilla state as it may be
5430 * made visible again.
5431 *
5432 * The current implementation only resets the essential configurations.
5433 * This needs to be expanded to cover all the visible parts.
5434 */
5435static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5436{
5437        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5438
5439        page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5440        page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5441        page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5442        page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5443        page_counter_set_min(&memcg->memory, 0);
5444        page_counter_set_low(&memcg->memory, 0);
5445        page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5446        memcg->soft_limit = PAGE_COUNTER_MAX;
5447        page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5448        memcg_wb_domain_size_changed(memcg);
5449}
5450
5451#ifdef CONFIG_MMU
5452/* Handlers for move charge at task migration. */
5453static int mem_cgroup_do_precharge(unsigned long count)
5454{
5455        int ret;
5456
5457        /* Try a single bulk charge without reclaim first, kswapd may wake */
5458        ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5459        if (!ret) {
5460                mc.precharge += count;
5461                return ret;
5462        }
5463
5464        /* Try charges one by one with reclaim, but do not retry */
5465        while (count--) {
5466                ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5467                if (ret)
5468                        return ret;
5469                mc.precharge++;
5470                cond_resched();
5471        }
5472        return 0;
5473}
5474
5475union mc_target {
5476        struct page     *page;
5477        swp_entry_t     ent;
5478};
5479
5480enum mc_target_type {
5481        MC_TARGET_NONE = 0,
5482        MC_TARGET_PAGE,
5483        MC_TARGET_SWAP,
5484        MC_TARGET_DEVICE,
5485};
5486
5487static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5488                                                unsigned long addr, pte_t ptent)
5489{
5490        struct page *page = vm_normal_page(vma, addr, ptent);
5491
5492        if (!page || !page_mapped(page))
5493                return NULL;
5494        if (PageAnon(page)) {
5495                if (!(mc.flags & MOVE_ANON))
5496                        return NULL;
5497        } else {
5498                if (!(mc.flags & MOVE_FILE))
5499                        return NULL;
5500        }
5501        if (!get_page_unless_zero(page))
5502                return NULL;
5503
5504        return page;
5505}
5506
5507#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5508static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5509                        pte_t ptent, swp_entry_t *entry)
5510{
5511        struct page *page = NULL;
5512        swp_entry_t ent = pte_to_swp_entry(ptent);
5513
5514        if (!(mc.flags & MOVE_ANON))
5515                return NULL;
5516
5517        /*
5518         * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5519         * a device and because they are not accessible by CPU they are store
5520         * as special swap entry in the CPU page table.
5521         */
5522        if (is_device_private_entry(ent)) {
5523                page = device_private_entry_to_page(ent);
5524                /*
5525                 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5526                 * a refcount of 1 when free (unlike normal page)
5527                 */
5528                if (!page_ref_add_unless(page, 1, 1))
5529                        return NULL;
5530                return page;
5531        }
5532
5533        if (non_swap_entry(ent))
5534                return NULL;
5535
5536        /*
5537         * Because lookup_swap_cache() updates some statistics counter,
5538         * we call find_get_page() with swapper_space directly.
5539         */
5540        page = find_get_page(swap_address_space(ent), swp_offset(ent));
5541        entry->val = ent.val;
5542
5543        return page;
5544}
5545#else
5546static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5547                        pte_t ptent, swp_entry_t *entry)
5548{
5549        return NULL;
5550}
5551#endif
5552
5553static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5554                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
5555{
5556        if (!vma->vm_file) /* anonymous vma */
5557                return NULL;
5558        if (!(mc.flags & MOVE_FILE))
5559                return NULL;
5560
5561        /* page is moved even if it's not RSS of this task(page-faulted). */
5562        /* shmem/tmpfs may report page out on swap: account for that too. */
5563        return find_get_incore_page(vma->vm_file->f_mapping,
5564                        linear_page_index(vma, addr));
5565}
5566
5567/**
5568 * mem_cgroup_move_account - move account of the page
5569 * @page: the page
5570 * @compound: charge the page as compound or small page
5571 * @from: mem_cgroup which the page is moved from.
5572 * @to: mem_cgroup which the page is moved to. @from != @to.
5573 *
5574 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5575 *
5576 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5577 * from old cgroup.
5578 */
5579static int mem_cgroup_move_account(struct page *page,
5580                                   bool compound,
5581                                   struct mem_cgroup *from,
5582                                   struct mem_cgroup *to)
5583{
5584        struct lruvec *from_vec, *to_vec;
5585        struct pglist_data *pgdat;
5586        unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5587        int ret;
5588
5589        VM_BUG_ON(from == to);
5590        VM_BUG_ON_PAGE(PageLRU(page), page);
5591        VM_BUG_ON(compound && !PageTransHuge(page));
5592
5593        /*
5594         * Prevent mem_cgroup_migrate() from looking at
5595         * page's memory cgroup of its source page while we change it.
5596         */
5597        ret = -EBUSY;
5598        if (!trylock_page(page))
5599                goto out;
5600
5601        ret = -EINVAL;
5602        if (page_memcg(page) != from)
5603                goto out_unlock;
5604
5605        pgdat = page_pgdat(page);
5606        from_vec = mem_cgroup_lruvec(from, pgdat);
5607        to_vec = mem_cgroup_lruvec(to, pgdat);
5608
5609        lock_page_memcg(page);
5610
5611        if (PageAnon(page)) {
5612                if (page_mapped(page)) {
5613                        __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5614                        __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5615                        if (PageTransHuge(page)) {
5616                                __mod_lruvec_state(from_vec, NR_ANON_THPS,
5617                                                   -nr_pages);
5618                                __mod_lruvec_state(to_vec, NR_ANON_THPS,
5619                                                   nr_pages);
5620                        }
5621                }
5622        } else {
5623                __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5624                __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5625
5626                if (PageSwapBacked(page)) {
5627                        __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5628                        __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5629                }
5630
5631                if (page_mapped(page)) {
5632                        __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5633                        __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5634                }
5635
5636                if (PageDirty(page)) {
5637                        struct address_space *mapping = page_mapping(page);
5638
5639                        if (mapping_can_writeback(mapping)) {
5640                                __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5641                                                   -nr_pages);
5642                                __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5643                                                   nr_pages);
5644                        }
5645                }
5646        }
5647
5648        if (PageWriteback(page)) {
5649                __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5650                __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5651        }
5652
5653        /*
5654         * All state has been migrated, let's switch to the new memcg.
5655         *
5656         * It is safe to change page's memcg here because the page
5657         * is referenced, charged, isolated, and locked: we can't race
5658         * with (un)charging, migration, LRU putback, or anything else
5659         * that would rely on a stable page's memory cgroup.
5660         *
5661         * Note that lock_page_memcg is a memcg lock, not a page lock,
5662         * to save space. As soon as we switch page's memory cgroup to a
5663         * new memcg that isn't locked, the above state can change
5664         * concurrently again. Make sure we're truly done with it.
5665         */
5666        smp_mb();
5667
5668        css_get(&to->css);
5669        css_put(&from->css);
5670
5671        page->memcg_data = (unsigned long)to;
5672
5673        __unlock_page_memcg(from);
5674
5675        ret = 0;
5676
5677        local_irq_disable();
5678        mem_cgroup_charge_statistics(to, page, nr_pages);
5679        memcg_check_events(to, page);
5680        mem_cgroup_charge_statistics(from, page, -nr_pages);
5681        memcg_check_events(from, page);
5682        local_irq_enable();
5683out_unlock:
5684        unlock_page(page);
5685out:
5686        return ret;
5687}
5688
5689/**
5690 * get_mctgt_type - get target type of moving charge
5691 * @vma: the vma the pte to be checked belongs
5692 * @addr: the address corresponding to the pte to be checked
5693 * @ptent: the pte to be checked
5694 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5695 *
5696 * Returns
5697 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5698 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5699 *     move charge. if @target is not NULL, the page is stored in target->page
5700 *     with extra refcnt got(Callers should handle it).
5701 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5702 *     target for charge migration. if @target is not NULL, the entry is stored
5703 *     in target->ent.
5704 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5705 *     (so ZONE_DEVICE page and thus not on the lru).
5706 *     For now we such page is charge like a regular page would be as for all
5707 *     intent and purposes it is just special memory taking the place of a
5708 *     regular page.
5709 *
5710 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5711 *
5712 * Called with pte lock held.
5713 */
5714
5715static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5716                unsigned long addr, pte_t ptent, union mc_target *target)
5717{
5718        struct page *page = NULL;
5719        enum mc_target_type ret = MC_TARGET_NONE;
5720        swp_entry_t ent = { .val = 0 };
5721
5722        if (pte_present(ptent))
5723                page = mc_handle_present_pte(vma, addr, ptent);
5724        else if (is_swap_pte(ptent))
5725                page = mc_handle_swap_pte(vma, ptent, &ent);
5726        else if (pte_none(ptent))
5727                page = mc_handle_file_pte(vma, addr, ptent, &ent);
5728
5729        if (!page && !ent.val)
5730                return ret;
5731        if (page) {
5732                /*
5733                 * Do only loose check w/o serialization.
5734                 * mem_cgroup_move_account() checks the page is valid or
5735                 * not under LRU exclusion.
5736                 */
5737                if (page_memcg(page) == mc.from) {
5738                        ret = MC_TARGET_PAGE;
5739                        if (is_device_private_page(page))
5740                                ret = MC_TARGET_DEVICE;
5741                        if (target)
5742                                target->page = page;
5743                }
5744                if (!ret || !target)
5745                        put_page(page);
5746        }
5747        /*
5748         * There is a swap entry and a page doesn't exist or isn't charged.
5749         * But we cannot move a tail-page in a THP.
5750         */
5751        if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5752            mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5753                ret = MC_TARGET_SWAP;
5754                if (target)
5755                        target->ent = ent;
5756        }
5757        return ret;
5758}
5759
5760#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5761/*
5762 * We don't consider PMD mapped swapping or file mapped pages because THP does
5763 * not support them for now.
5764 * Caller should make sure that pmd_trans_huge(pmd) is true.
5765 */
5766static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5767                unsigned long addr, pmd_t pmd, union mc_target *target)
5768{
5769        struct page *page = NULL;
5770        enum mc_target_type ret = MC_TARGET_NONE;
5771
5772        if (unlikely(is_swap_pmd(pmd))) {
5773                VM_BUG_ON(thp_migration_supported() &&
5774                                  !is_pmd_migration_entry(pmd));
5775                return ret;
5776        }
5777        page = pmd_page(pmd);
5778        VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5779        if (!(mc.flags & MOVE_ANON))
5780                return ret;
5781        if (page_memcg(page) == mc.from) {
5782                ret = MC_TARGET_PAGE;
5783                if (target) {
5784                        get_page(page);
5785                        target->page = page;
5786                }
5787        }
5788        return ret;
5789}
5790#else
5791static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5792                unsigned long addr, pmd_t pmd, union mc_target *target)
5793{
5794        return MC_TARGET_NONE;
5795}
5796#endif
5797
5798static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5799                                        unsigned long addr, unsigned long end,
5800                                        struct mm_walk *walk)
5801{
5802        struct vm_area_struct *vma = walk->vma;
5803        pte_t *pte;
5804        spinlock_t *ptl;
5805
5806        ptl = pmd_trans_huge_lock(pmd, vma);
5807        if (ptl) {
5808                /*
5809                 * Note their can not be MC_TARGET_DEVICE for now as we do not
5810                 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5811                 * this might change.
5812                 */
5813                if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5814                        mc.precharge += HPAGE_PMD_NR;
5815                spin_unlock(ptl);
5816                return 0;
5817        }
5818
5819        if (pmd_trans_unstable(pmd))
5820                return 0;
5821        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5822        for (; addr != end; pte++, addr += PAGE_SIZE)
5823                if (get_mctgt_type(vma, addr, *pte, NULL))
5824                        mc.precharge++; /* increment precharge temporarily */
5825        pte_unmap_unlock(pte - 1, ptl);
5826        cond_resched();
5827
5828        return 0;
5829}
5830
5831static const struct mm_walk_ops precharge_walk_ops = {
5832        .pmd_entry      = mem_cgroup_count_precharge_pte_range,
5833};
5834
5835static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5836{
5837        unsigned long precharge;
5838
5839        mmap_read_lock(mm);
5840        walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5841        mmap_read_unlock(mm);
5842
5843        precharge = mc.precharge;
5844        mc.precharge = 0;
5845
5846        return precharge;
5847}
5848
5849static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5850{
5851        unsigned long precharge = mem_cgroup_count_precharge(mm);
5852
5853        VM_BUG_ON(mc.moving_task);
5854        mc.moving_task = current;
5855        return mem_cgroup_do_precharge(precharge);
5856}
5857
5858/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5859static void __mem_cgroup_clear_mc(void)
5860{
5861        struct mem_cgroup *from = mc.from;
5862        struct mem_cgroup *to = mc.to;
5863
5864        /* we must uncharge all the leftover precharges from mc.to */
5865        if (mc.precharge) {
5866                cancel_charge(mc.to, mc.precharge);
5867                mc.precharge = 0;
5868        }
5869        /*
5870         * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5871         * we must uncharge here.
5872         */
5873        if (mc.moved_charge) {
5874                cancel_charge(mc.from, mc.moved_charge);
5875                mc.moved_charge = 0;
5876        }
5877        /* we must fixup refcnts and charges */
5878        if (mc.moved_swap) {
5879                /* uncharge swap account from the old cgroup */
5880                if (!mem_cgroup_is_root(mc.from))
5881                        page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5882
5883                mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5884
5885                /*
5886                 * we charged both to->memory and to->memsw, so we
5887                 * should uncharge to->memory.
5888                 */
5889                if (!mem_cgroup_is_root(mc.to))
5890                        page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5891
5892                mc.moved_swap = 0;
5893        }
5894        memcg_oom_recover(from);
5895        memcg_oom_recover(to);
5896        wake_up_all(&mc.waitq);
5897}
5898
5899static void mem_cgroup_clear_mc(void)
5900{
5901        struct mm_struct *mm = mc.mm;
5902
5903        /*
5904         * we must clear moving_task before waking up waiters at the end of
5905         * task migration.
5906         */
5907        mc.moving_task = NULL;
5908        __mem_cgroup_clear_mc();
5909        spin_lock(&mc.lock);
5910        mc.from = NULL;
5911        mc.to = NULL;
5912        mc.mm = NULL;
5913        spin_unlock(&mc.lock);
5914
5915        mmput(mm);
5916}
5917
5918static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5919{
5920        struct cgroup_subsys_state *css;
5921        struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5922        struct mem_cgroup *from;
5923        struct task_struct *leader, *p;
5924        struct mm_struct *mm;
5925        unsigned long move_flags;
5926        int ret = 0;
5927
5928        /* charge immigration isn't supported on the default hierarchy */
5929        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5930                return 0;
5931
5932        /*
5933         * Multi-process migrations only happen on the default hierarchy
5934         * where charge immigration is not used.  Perform charge
5935         * immigration if @tset contains a leader and whine if there are
5936         * multiple.
5937         */
5938        p = NULL;
5939        cgroup_taskset_for_each_leader(leader, css, tset) {
5940                WARN_ON_ONCE(p);
5941                p = leader;
5942                memcg = mem_cgroup_from_css(css);
5943        }
5944        if (!p)
5945                return 0;
5946
5947        /*
5948         * We are now commited to this value whatever it is. Changes in this
5949         * tunable will only affect upcoming migrations, not the current one.
5950         * So we need to save it, and keep it going.
5951         */
5952        move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5953        if (!move_flags)
5954                return 0;
5955
5956        from = mem_cgroup_from_task(p);
5957
5958        VM_BUG_ON(from == memcg);
5959
5960        mm = get_task_mm(p);
5961        if (!mm)
5962                return 0;
5963        /* We move charges only when we move a owner of the mm */
5964        if (mm->owner == p) {
5965                VM_BUG_ON(mc.from);
5966                VM_BUG_ON(mc.to);
5967                VM_BUG_ON(mc.precharge);
5968                VM_BUG_ON(mc.moved_charge);
5969                VM_BUG_ON(mc.moved_swap);
5970
5971                spin_lock(&mc.lock);
5972                mc.mm = mm;
5973                mc.from = from;
5974                mc.to = memcg;
5975                mc.flags = move_flags;
5976                spin_unlock(&mc.lock);
5977                /* We set mc.moving_task later */
5978
5979                ret = mem_cgroup_precharge_mc(mm);
5980                if (ret)
5981                        mem_cgroup_clear_mc();
5982        } else {
5983                mmput(mm);
5984        }
5985        return ret;
5986}
5987
5988static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5989{
5990        if (mc.to)
5991                mem_cgroup_clear_mc();
5992}
5993
5994static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5995                                unsigned long addr, unsigned long end,
5996                                struct mm_walk *walk)
5997{
5998        int ret = 0;
5999        struct vm_area_struct *vma = walk->vma;
6000        pte_t *pte;
6001        spinlock_t *ptl;
6002        enum mc_target_type target_type;
6003        union mc_target target;
6004        struct page *page;
6005
6006        ptl = pmd_trans_huge_lock(pmd, vma);
6007        if (ptl) {
6008                if (mc.precharge < HPAGE_PMD_NR) {
6009                        spin_unlock(ptl);
6010                        return 0;
6011                }
6012                target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6013                if (target_type == MC_TARGET_PAGE) {
6014                        page = target.page;
6015                        if (!isolate_lru_page(page)) {
6016                                if (!mem_cgroup_move_account(page, true,
6017                                                             mc.from, mc.to)) {
6018                                        mc.precharge -= HPAGE_PMD_NR;
6019                                        mc.moved_charge += HPAGE_PMD_NR;
6020                                }
6021                                putback_lru_page(page);
6022                        }
6023                        put_page(page);
6024                } else if (target_type == MC_TARGET_DEVICE) {
6025                        page = target.page;
6026                        if (!mem_cgroup_move_account(page, true,
6027                                                     mc.from, mc.to)) {
6028                                mc.precharge -= HPAGE_PMD_NR;
6029                                mc.moved_charge += HPAGE_PMD_NR;
6030                        }
6031                        put_page(page);
6032                }
6033                spin_unlock(ptl);
6034                return 0;
6035        }
6036
6037        if (pmd_trans_unstable(pmd))
6038                return 0;
6039retry:
6040        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6041        for (; addr != end; addr += PAGE_SIZE) {
6042                pte_t ptent = *(pte++);
6043                bool device = false;
6044                swp_entry_t ent;
6045
6046                if (!mc.precharge)
6047                        break;
6048
6049                switch (get_mctgt_type(vma, addr, ptent, &target)) {
6050                case MC_TARGET_DEVICE:
6051                        device = true;
6052                        fallthrough;
6053                case MC_TARGET_PAGE:
6054                        page = target.page;
6055                        /*
6056                         * We can have a part of the split pmd here. Moving it
6057                         * can be done but it would be too convoluted so simply
6058                         * ignore such a partial THP and keep it in original
6059                         * memcg. There should be somebody mapping the head.
6060                         */
6061                        if (PageTransCompound(page))
6062                                goto put;
6063                        if (!device && isolate_lru_page(page))
6064                                goto put;
6065                        if (!mem_cgroup_move_account(page, false,
6066                                                mc.from, mc.to)) {
6067                                mc.precharge--;
6068                                /* we uncharge from mc.from later. */
6069                                mc.moved_charge++;
6070                        }
6071                        if (!device)
6072                                putback_lru_page(page);
6073put:                    /* get_mctgt_type() gets the page */
6074                        put_page(page);
6075                        break;
6076                case MC_TARGET_SWAP:
6077                        ent = target.ent;
6078                        if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6079                                mc.precharge--;
6080                                mem_cgroup_id_get_many(mc.to, 1);
6081                                /* we fixup other refcnts and charges later. */
6082                                mc.moved_swap++;
6083                        }
6084                        break;
6085                default:
6086                        break;
6087                }
6088        }
6089        pte_unmap_unlock(pte - 1, ptl);
6090        cond_resched();
6091
6092        if (addr != end) {
6093                /*
6094                 * We have consumed all precharges we got in can_attach().
6095                 * We try charge one by one, but don't do any additional
6096                 * charges to mc.to if we have failed in charge once in attach()
6097                 * phase.
6098                 */
6099                ret = mem_cgroup_do_precharge(1);
6100                if (!ret)
6101                        goto retry;
6102        }
6103
6104        return ret;
6105}
6106
6107static const struct mm_walk_ops charge_walk_ops = {
6108        .pmd_entry      = mem_cgroup_move_charge_pte_range,
6109};
6110
6111static void mem_cgroup_move_charge(void)
6112{
6113        lru_add_drain_all();
6114        /*
6115         * Signal lock_page_memcg() to take the memcg's move_lock
6116         * while we're moving its pages to another memcg. Then wait
6117         * for already started RCU-only updates to finish.
6118         */
6119        atomic_inc(&mc.from->moving_account);
6120        synchronize_rcu();
6121retry:
6122        if (unlikely(!mmap_read_trylock(mc.mm))) {
6123                /*
6124                 * Someone who are holding the mmap_lock might be waiting in
6125                 * waitq. So we cancel all extra charges, wake up all waiters,
6126                 * and retry. Because we cancel precharges, we might not be able
6127                 * to move enough charges, but moving charge is a best-effort
6128                 * feature anyway, so it wouldn't be a big problem.
6129                 */
6130                __mem_cgroup_clear_mc();
6131                cond_resched();
6132                goto retry;
6133        }
6134        /*
6135         * When we have consumed all precharges and failed in doing
6136         * additional charge, the page walk just aborts.
6137         */
6138        walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6139                        NULL);
6140
6141        mmap_read_unlock(mc.mm);
6142        atomic_dec(&mc.from->moving_account);
6143}
6144
6145static void mem_cgroup_move_task(void)
6146{
6147        if (mc.to) {
6148                mem_cgroup_move_charge();
6149                mem_cgroup_clear_mc();
6150        }
6151}
6152#else   /* !CONFIG_MMU */
6153static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6154{
6155        return 0;
6156}
6157static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6158{
6159}
6160static void mem_cgroup_move_task(void)
6161{
6162}
6163#endif
6164
6165static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6166{
6167        if (value == PAGE_COUNTER_MAX)
6168                seq_puts(m, "max\n");
6169        else
6170                seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6171
6172        return 0;
6173}
6174
6175static u64 memory_current_read(struct cgroup_subsys_state *css,
6176                               struct cftype *cft)
6177{
6178        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6179
6180        return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6181}
6182
6183static int memory_min_show(struct seq_file *m, void *v)
6184{
6185        return seq_puts_memcg_tunable(m,
6186                READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6187}
6188
6189static ssize_t memory_min_write(struct kernfs_open_file *of,
6190                                char *buf, size_t nbytes, loff_t off)
6191{
6192        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6193        unsigned long min;
6194        int err;
6195
6196        buf = strstrip(buf);
6197        err = page_counter_memparse(buf, "max", &min);
6198        if (err)
6199                return err;
6200
6201        page_counter_set_min(&memcg->memory, min);
6202
6203        return nbytes;
6204}
6205
6206static int memory_low_show(struct seq_file *m, void *v)
6207{
6208        return seq_puts_memcg_tunable(m,
6209                READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6210}
6211
6212static ssize_t memory_low_write(struct kernfs_open_file *of,
6213                                char *buf, size_t nbytes, loff_t off)
6214{
6215        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6216        unsigned long low;
6217        int err;
6218
6219        buf = strstrip(buf);
6220        err = page_counter_memparse(buf, "max", &low);
6221        if (err)
6222                return err;
6223
6224        page_counter_set_low(&memcg->memory, low);
6225
6226        return nbytes;
6227}
6228
6229static int memory_high_show(struct seq_file *m, void *v)
6230{
6231        return seq_puts_memcg_tunable(m,
6232                READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6233}
6234
6235static ssize_t memory_high_write(struct kernfs_open_file *of,
6236                                 char *buf, size_t nbytes, loff_t off)
6237{
6238        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6239        unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6240        bool drained = false;
6241        unsigned long high;
6242        int err;
6243
6244        buf = strstrip(buf);
6245        err = page_counter_memparse(buf, "max", &high);
6246        if (err)
6247                return err;
6248
6249        page_counter_set_high(&memcg->memory, high);
6250
6251        for (;;) {
6252                unsigned long nr_pages = page_counter_read(&memcg->memory);
6253                unsigned long reclaimed;
6254
6255                if (nr_pages <= high)
6256                        break;
6257
6258                if (signal_pending(current))
6259                        break;
6260
6261                if (!drained) {
6262                        drain_all_stock(memcg);
6263                        drained = true;
6264                        continue;
6265                }
6266
6267                reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6268                                                         GFP_KERNEL, true);
6269
6270                if (!reclaimed && !nr_retries--)
6271                        break;
6272        }
6273
6274        memcg_wb_domain_size_changed(memcg);
6275        return nbytes;
6276}
6277
6278static int memory_max_show(struct seq_file *m, void *v)
6279{
6280        return seq_puts_memcg_tunable(m,
6281                READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6282}
6283
6284static ssize_t memory_max_write(struct kernfs_open_file *of,
6285                                char *buf, size_t nbytes, loff_t off)
6286{
6287        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6288        unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6289        bool drained = false;
6290        unsigned long max;
6291        int err;
6292
6293        buf = strstrip(buf);
6294        err = page_counter_memparse(buf, "max", &max);
6295        if (err)
6296                return err;
6297
6298        xchg(&memcg->memory.max, max);
6299
6300        for (;;) {
6301                unsigned long nr_pages = page_counter_read(&memcg->memory);
6302
6303                if (nr_pages <= max)
6304                        break;
6305
6306                if (signal_pending(current))
6307                        break;
6308
6309                if (!drained) {
6310                        drain_all_stock(memcg);
6311                        drained = true;
6312                        continue;
6313                }
6314
6315                if (nr_reclaims) {
6316                        if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6317                                                          GFP_KERNEL, true))
6318                                nr_reclaims--;
6319                        continue;
6320                }
6321
6322                memcg_memory_event(memcg, MEMCG_OOM);
6323                if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6324                        break;
6325        }
6326
6327        memcg_wb_domain_size_changed(memcg);
6328        return nbytes;
6329}
6330
6331static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6332{
6333        seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6334        seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6335        seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6336        seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6337        seq_printf(m, "oom_kill %lu\n",
6338                   atomic_long_read(&events[MEMCG_OOM_KILL]));
6339}
6340
6341static int memory_events_show(struct seq_file *m, void *v)
6342{
6343        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6344
6345        __memory_events_show(m, memcg->memory_events);
6346        return 0;
6347}
6348
6349static int memory_events_local_show(struct seq_file *m, void *v)
6350{
6351        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6352
6353        __memory_events_show(m, memcg->memory_events_local);
6354        return 0;
6355}
6356
6357static int memory_stat_show(struct seq_file *m, void *v)
6358{
6359        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6360        char *buf;
6361
6362        buf = memory_stat_format(memcg);
6363        if (!buf)
6364                return -ENOMEM;
6365        seq_puts(m, buf);
6366        kfree(buf);
6367        return 0;
6368}
6369
6370#ifdef CONFIG_NUMA
6371static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6372                                                     int item)
6373{
6374        return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6375}
6376
6377static int memory_numa_stat_show(struct seq_file *m, void *v)
6378{
6379        int i;
6380        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6381
6382        for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6383                int nid;
6384
6385                if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6386                        continue;
6387
6388                seq_printf(m, "%s", memory_stats[i].name);
6389                for_each_node_state(nid, N_MEMORY) {
6390                        u64 size;
6391                        struct lruvec *lruvec;
6392
6393                        lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6394                        size = lruvec_page_state_output(lruvec,
6395                                                        memory_stats[i].idx);
6396                        seq_printf(m, " N%d=%llu", nid, size);
6397                }
6398                seq_putc(m, '\n');
6399        }
6400
6401        return 0;
6402}
6403#endif
6404
6405static int memory_oom_group_show(struct seq_file *m, void *v)
6406{
6407        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6408
6409        seq_printf(m, "%d\n", memcg->oom_group);
6410
6411        return 0;
6412}
6413
6414static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6415                                      char *buf, size_t nbytes, loff_t off)
6416{
6417        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6418        int ret, oom_group;
6419
6420        buf = strstrip(buf);
6421        if (!buf)
6422                return -EINVAL;
6423
6424        ret = kstrtoint(buf, 0, &oom_group);
6425        if (ret)
6426                return ret;
6427
6428        if (oom_group != 0 && oom_group != 1)
6429                return -EINVAL;
6430
6431        memcg->oom_group = oom_group;
6432
6433        return nbytes;
6434}
6435
6436static struct cftype memory_files[] = {
6437        {
6438                .name = "current",
6439                .flags = CFTYPE_NOT_ON_ROOT,
6440                .read_u64 = memory_current_read,
6441        },
6442        {
6443                .name = "min",
6444                .flags = CFTYPE_NOT_ON_ROOT,
6445                .seq_show = memory_min_show,
6446                .write = memory_min_write,
6447        },
6448        {
6449                .name = "low",
6450                .flags = CFTYPE_NOT_ON_ROOT,
6451                .seq_show = memory_low_show,
6452                .write = memory_low_write,
6453        },
6454        {
6455                .name = "high",
6456                .flags = CFTYPE_NOT_ON_ROOT,
6457                .seq_show = memory_high_show,
6458                .write = memory_high_write,
6459        },
6460        {
6461                .name = "max",
6462                .flags = CFTYPE_NOT_ON_ROOT,
6463                .seq_show = memory_max_show,
6464                .write = memory_max_write,
6465        },
6466        {
6467                .name = "events",
6468                .flags = CFTYPE_NOT_ON_ROOT,
6469                .file_offset = offsetof(struct mem_cgroup, events_file),
6470                .seq_show = memory_events_show,
6471        },
6472        {
6473                .name = "events.local",
6474                .flags = CFTYPE_NOT_ON_ROOT,
6475                .file_offset = offsetof(struct mem_cgroup, events_local_file),
6476                .seq_show = memory_events_local_show,
6477        },
6478        {
6479                .name = "stat",
6480                .seq_show = memory_stat_show,
6481        },
6482#ifdef CONFIG_NUMA
6483        {
6484                .name = "numa_stat",
6485                .seq_show = memory_numa_stat_show,
6486        },
6487#endif
6488        {
6489                .name = "oom.group",
6490                .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6491                .seq_show = memory_oom_group_show,
6492                .write = memory_oom_group_write,
6493        },
6494        { }     /* terminate */
6495};
6496
6497struct cgroup_subsys memory_cgrp_subsys = {
6498        .css_alloc = mem_cgroup_css_alloc,
6499        .css_online = mem_cgroup_css_online,
6500        .css_offline = mem_cgroup_css_offline,
6501        .css_released = mem_cgroup_css_released,
6502        .css_free = mem_cgroup_css_free,
6503        .css_reset = mem_cgroup_css_reset,
6504        .can_attach = mem_cgroup_can_attach,
6505        .cancel_attach = mem_cgroup_cancel_attach,
6506        .post_attach = mem_cgroup_move_task,
6507        .dfl_cftypes = memory_files,
6508        .legacy_cftypes = mem_cgroup_legacy_files,
6509        .early_init = 0,
6510};
6511
6512/*
6513 * This function calculates an individual cgroup's effective
6514 * protection which is derived from its own memory.min/low, its
6515 * parent's and siblings' settings, as well as the actual memory
6516 * distribution in the tree.
6517 *
6518 * The following rules apply to the effective protection values:
6519 *
6520 * 1. At the first level of reclaim, effective protection is equal to
6521 *    the declared protection in memory.min and memory.low.
6522 *
6523 * 2. To enable safe delegation of the protection configuration, at
6524 *    subsequent levels the effective protection is capped to the
6525 *    parent's effective protection.
6526 *
6527 * 3. To make complex and dynamic subtrees easier to configure, the
6528 *    user is allowed to overcommit the declared protection at a given
6529 *    level. If that is the case, the parent's effective protection is
6530 *    distributed to the children in proportion to how much protection
6531 *    they have declared and how much of it they are utilizing.
6532 *
6533 *    This makes distribution proportional, but also work-conserving:
6534 *    if one cgroup claims much more protection than it uses memory,
6535 *    the unused remainder is available to its siblings.
6536 *
6537 * 4. Conversely, when the declared protection is undercommitted at a
6538 *    given level, the distribution of the larger parental protection
6539 *    budget is NOT proportional. A cgroup's protection from a sibling
6540 *    is capped to its own memory.min/low setting.
6541 *
6542 * 5. However, to allow protecting recursive subtrees from each other
6543 *    without having to declare each individual cgroup's fixed share
6544 *    of the ancestor's claim to protection, any unutilized -
6545 *    "floating" - protection from up the tree is distributed in
6546 *    proportion to each cgroup's *usage*. This makes the protection
6547 *    neutral wrt sibling cgroups and lets them compete freely over
6548 *    the shared parental protection budget, but it protects the
6549 *    subtree as a whole from neighboring subtrees.
6550 *
6551 * Note that 4. and 5. are not in conflict: 4. is about protecting
6552 * against immediate siblings whereas 5. is about protecting against
6553 * neighboring subtrees.
6554 */
6555static unsigned long effective_protection(unsigned long usage,
6556                                          unsigned long parent_usage,
6557                                          unsigned long setting,
6558                                          unsigned long parent_effective,
6559                                          unsigned long siblings_protected)
6560{
6561        unsigned long protected;
6562        unsigned long ep;
6563
6564        protected = min(usage, setting);
6565        /*
6566         * If all cgroups at this level combined claim and use more
6567         * protection then what the parent affords them, distribute
6568         * shares in proportion to utilization.
6569         *
6570         * We are using actual utilization rather than the statically
6571         * claimed protection in order to be work-conserving: claimed
6572         * but unused protection is available to siblings that would
6573         * otherwise get a smaller chunk than what they claimed.
6574         */
6575        if (siblings_protected > parent_effective)
6576                return protected * parent_effective / siblings_protected;
6577
6578        /*
6579         * Ok, utilized protection of all children is within what the
6580         * parent affords them, so we know whatever this child claims
6581         * and utilizes is effectively protected.
6582         *
6583         * If there is unprotected usage beyond this value, reclaim
6584         * will apply pressure in proportion to that amount.
6585         *
6586         * If there is unutilized protection, the cgroup will be fully
6587         * shielded from reclaim, but we do return a smaller value for
6588         * protection than what the group could enjoy in theory. This
6589         * is okay. With the overcommit distribution above, effective
6590         * protection is always dependent on how memory is actually
6591         * consumed among the siblings anyway.
6592         */
6593        ep = protected;
6594
6595        /*
6596         * If the children aren't claiming (all of) the protection
6597         * afforded to them by the parent, distribute the remainder in
6598         * proportion to the (unprotected) memory of each cgroup. That
6599         * way, cgroups that aren't explicitly prioritized wrt each
6600         * other compete freely over the allowance, but they are
6601         * collectively protected from neighboring trees.
6602         *
6603         * We're using unprotected memory for the weight so that if
6604         * some cgroups DO claim explicit protection, we don't protect
6605         * the same bytes twice.
6606         *
6607         * Check both usage and parent_usage against the respective
6608         * protected values. One should imply the other, but they
6609         * aren't read atomically - make sure the division is sane.
6610         */
6611        if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6612                return ep;
6613        if (parent_effective > siblings_protected &&
6614            parent_usage > siblings_protected &&
6615            usage > protected) {
6616                unsigned long unclaimed;
6617
6618                unclaimed = parent_effective - siblings_protected;
6619                unclaimed *= usage - protected;
6620                unclaimed /= parent_usage - siblings_protected;
6621
6622                ep += unclaimed;
6623        }
6624
6625        return ep;
6626}
6627
6628/**
6629 * mem_cgroup_protected - check if memory consumption is in the normal range
6630 * @root: the top ancestor of the sub-tree being checked
6631 * @memcg: the memory cgroup to check
6632 *
6633 * WARNING: This function is not stateless! It can only be used as part
6634 *          of a top-down tree iteration, not for isolated queries.
6635 */
6636void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6637                                     struct mem_cgroup *memcg)
6638{
6639        unsigned long usage, parent_usage;
6640        struct mem_cgroup *parent;
6641
6642        if (mem_cgroup_disabled())
6643                return;
6644
6645        if (!root)
6646                root = root_mem_cgroup;
6647
6648        /*
6649         * Effective values of the reclaim targets are ignored so they
6650         * can be stale. Have a look at mem_cgroup_protection for more
6651         * details.
6652         * TODO: calculation should be more robust so that we do not need
6653         * that special casing.
6654         */
6655        if (memcg == root)
6656                return;
6657
6658        usage = page_counter_read(&memcg->memory);
6659        if (!usage)
6660                return;
6661
6662        parent = parent_mem_cgroup(memcg);
6663        /* No parent means a non-hierarchical mode on v1 memcg */
6664        if (!parent)
6665                return;
6666
6667        if (parent == root) {
6668                memcg->memory.emin = READ_ONCE(memcg->memory.min);
6669                memcg->memory.elow = READ_ONCE(memcg->memory.low);
6670                return;
6671        }
6672
6673        parent_usage = page_counter_read(&parent->memory);
6674
6675        WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6676                        READ_ONCE(memcg->memory.min),
6677                        READ_ONCE(parent->memory.emin),
6678                        atomic_long_read(&parent->memory.children_min_usage)));
6679
6680        WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6681                        READ_ONCE(memcg->memory.low),
6682                        READ_ONCE(parent->memory.elow),
6683                        atomic_long_read(&parent->memory.children_low_usage)));
6684}
6685
6686/**
6687 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6688 * @page: page to charge
6689 * @mm: mm context of the victim
6690 * @gfp_mask: reclaim mode
6691 *
6692 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6693 * pages according to @gfp_mask if necessary.
6694 *
6695 * Returns 0 on success. Otherwise, an error code is returned.
6696 */
6697int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6698{
6699        unsigned int nr_pages = thp_nr_pages(page);
6700        struct mem_cgroup *memcg = NULL;
6701        int ret = 0;
6702
6703        if (mem_cgroup_disabled())
6704                goto out;
6705
6706        if (PageSwapCache(page)) {
6707                swp_entry_t ent = { .val = page_private(page), };
6708                unsigned short id;
6709
6710                /*
6711                 * Every swap fault against a single page tries to charge the
6712                 * page, bail as early as possible.  shmem_unuse() encounters
6713                 * already charged pages, too.  page and memcg binding is
6714                 * protected by the page lock, which serializes swap cache
6715                 * removal, which in turn serializes uncharging.
6716                 */
6717                VM_BUG_ON_PAGE(!PageLocked(page), page);
6718                if (page_memcg(compound_head(page)))
6719                        goto out;
6720
6721                id = lookup_swap_cgroup_id(ent);
6722                rcu_read_lock();
6723                memcg = mem_cgroup_from_id(id);
6724                if (memcg && !css_tryget_online(&memcg->css))
6725                        memcg = NULL;
6726                rcu_read_unlock();
6727        }
6728
6729        if (!memcg)
6730                memcg = get_mem_cgroup_from_mm(mm);
6731
6732        ret = try_charge(memcg, gfp_mask, nr_pages);
6733        if (ret)
6734                goto out_put;
6735
6736        css_get(&memcg->css);
6737        commit_charge(page, memcg);
6738
6739        local_irq_disable();
6740        mem_cgroup_charge_statistics(memcg, page, nr_pages);
6741        memcg_check_events(memcg, page);
6742        local_irq_enable();
6743
6744        /*
6745         * Cgroup1's unified memory+swap counter has been charged with the
6746         * new swapcache page, finish the transfer by uncharging the swap
6747         * slot. The swap slot would also get uncharged when it dies, but
6748         * it can stick around indefinitely and we'd count the page twice
6749         * the entire time.
6750         *
6751         * Cgroup2 has separate resource counters for memory and swap,
6752         * so this is a non-issue here. Memory and swap charge lifetimes
6753         * correspond 1:1 to page and swap slot lifetimes: we charge the
6754         * page to memory here, and uncharge swap when the slot is freed.
6755         */
6756        if (do_memsw_account() && PageSwapCache(page)) {
6757                swp_entry_t entry = { .val = page_private(page) };
6758                /*
6759                 * The swap entry might not get freed for a long time,
6760                 * let's not wait for it.  The page already received a
6761                 * memory+swap charge, drop the swap entry duplicate.
6762                 */
6763                mem_cgroup_uncharge_swap(entry, nr_pages);
6764        }
6765
6766out_put:
6767        css_put(&memcg->css);
6768out:
6769        return ret;
6770}
6771
6772struct uncharge_gather {
6773        struct mem_cgroup *memcg;
6774        unsigned long nr_pages;
6775        unsigned long pgpgout;
6776        unsigned long nr_kmem;
6777        struct page *dummy_page;
6778};
6779
6780static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6781{
6782        memset(ug, 0, sizeof(*ug));
6783}
6784
6785static void uncharge_batch(const struct uncharge_gather *ug)
6786{
6787        unsigned long flags;
6788
6789        if (!mem_cgroup_is_root(ug->memcg)) {
6790                page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6791                if (do_memsw_account())
6792                        page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6793                if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6794                        page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6795                memcg_oom_recover(ug->memcg);
6796        }
6797
6798        local_irq_save(flags);
6799        __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6800        __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6801        memcg_check_events(ug->memcg, ug->dummy_page);
6802        local_irq_restore(flags);
6803
6804        /* drop reference from uncharge_page */
6805        css_put(&ug->memcg->css);
6806}
6807
6808static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6809{
6810        unsigned long nr_pages;
6811
6812        VM_BUG_ON_PAGE(PageLRU(page), page);
6813
6814        if (!page_memcg(page))
6815                return;
6816
6817        /*
6818         * Nobody should be changing or seriously looking at
6819         * page_memcg(page) at this point, we have fully
6820         * exclusive access to the page.
6821         */
6822
6823        if (ug->memcg != page_memcg(page)) {
6824                if (ug->memcg) {
6825                        uncharge_batch(ug);
6826                        uncharge_gather_clear(ug);
6827                }
6828                ug->memcg = page_memcg(page);
6829
6830                /* pairs with css_put in uncharge_batch */
6831                css_get(&ug->memcg->css);
6832        }
6833
6834        nr_pages = compound_nr(page);
6835        ug->nr_pages += nr_pages;
6836
6837        if (PageMemcgKmem(page))
6838                ug->nr_kmem += nr_pages;
6839        else
6840                ug->pgpgout++;
6841
6842        ug->dummy_page = page;
6843        page->memcg_data = 0;
6844        css_put(&ug->memcg->css);
6845}
6846
6847/**
6848 * mem_cgroup_uncharge - uncharge a page
6849 * @page: page to uncharge
6850 *
6851 * Uncharge a page previously charged with mem_cgroup_charge().
6852 */
6853void mem_cgroup_uncharge(struct page *page)
6854{
6855        struct uncharge_gather ug;
6856
6857        if (mem_cgroup_disabled())
6858                return;
6859
6860        /* Don't touch page->lru of any random page, pre-check: */
6861        if (!page_memcg(page))
6862                return;
6863
6864        uncharge_gather_clear(&ug);
6865        uncharge_page(page, &ug);
6866        uncharge_batch(&ug);
6867}
6868
6869/**
6870 * mem_cgroup_uncharge_list - uncharge a list of page
6871 * @page_list: list of pages to uncharge
6872 *
6873 * Uncharge a list of pages previously charged with
6874 * mem_cgroup_charge().
6875 */
6876void mem_cgroup_uncharge_list(struct list_head *page_list)
6877{
6878        struct uncharge_gather ug;
6879        struct page *page;
6880
6881        if (mem_cgroup_disabled())
6882                return;
6883
6884        uncharge_gather_clear(&ug);
6885        list_for_each_entry(page, page_list, lru)
6886                uncharge_page(page, &ug);
6887        if (ug.memcg)
6888                uncharge_batch(&ug);
6889}
6890
6891/**
6892 * mem_cgroup_migrate - charge a page's replacement
6893 * @oldpage: currently circulating page
6894 * @newpage: replacement page
6895 *
6896 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6897 * be uncharged upon free.
6898 *
6899 * Both pages must be locked, @newpage->mapping must be set up.
6900 */
6901void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6902{
6903        struct mem_cgroup *memcg;
6904        unsigned int nr_pages;
6905        unsigned long flags;
6906
6907        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6908        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6909        VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6910        VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6911                       newpage);
6912
6913        if (mem_cgroup_disabled())
6914                return;
6915
6916        /* Page cache replacement: new page already charged? */
6917        if (page_memcg(newpage))
6918                return;
6919
6920        memcg = page_memcg(oldpage);
6921        VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
6922        if (!memcg)
6923                return;
6924
6925        /* Force-charge the new page. The old one will be freed soon */
6926        nr_pages = thp_nr_pages(newpage);
6927
6928        page_counter_charge(&memcg->memory, nr_pages);
6929        if (do_memsw_account())
6930                page_counter_charge(&memcg->memsw, nr_pages);
6931
6932        css_get(&memcg->css);
6933        commit_charge(newpage, memcg);
6934
6935        local_irq_save(flags);
6936        mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6937        memcg_check_events(memcg, newpage);
6938        local_irq_restore(flags);
6939}
6940
6941DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6942EXPORT_SYMBOL(memcg_sockets_enabled_key);
6943
6944void mem_cgroup_sk_alloc(struct sock *sk)
6945{
6946        struct mem_cgroup *memcg;
6947
6948        if (!mem_cgroup_sockets_enabled)
6949                return;
6950
6951        /* Do not associate the sock with unrelated interrupted task's memcg. */
6952        if (in_interrupt())
6953                return;
6954
6955        rcu_read_lock();
6956        memcg = mem_cgroup_from_task(current);
6957        if (memcg == root_mem_cgroup)
6958                goto out;
6959        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6960                goto out;
6961        if (css_tryget(&memcg->css))
6962                sk->sk_memcg = memcg;
6963out:
6964        rcu_read_unlock();
6965}
6966
6967void mem_cgroup_sk_free(struct sock *sk)
6968{
6969        if (sk->sk_memcg)
6970                css_put(&sk->sk_memcg->css);
6971}
6972
6973/**
6974 * mem_cgroup_charge_skmem - charge socket memory
6975 * @memcg: memcg to charge
6976 * @nr_pages: number of pages to charge
6977 *
6978 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6979 * @memcg's configured limit, %false if the charge had to be forced.
6980 */
6981bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6982{
6983        gfp_t gfp_mask = GFP_KERNEL;
6984
6985        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6986                struct page_counter *fail;
6987
6988                if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6989                        memcg->tcpmem_pressure = 0;
6990                        return true;
6991                }
6992                page_counter_charge(&memcg->tcpmem, nr_pages);
6993                memcg->tcpmem_pressure = 1;
6994                return false;
6995        }
6996
6997        /* Don't block in the packet receive path */
6998        if (in_softirq())
6999                gfp_mask = GFP_NOWAIT;
7000
7001        mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7002
7003        if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7004                return true;
7005
7006        try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7007        return false;
7008}
7009
7010/**
7011 * mem_cgroup_uncharge_skmem - uncharge socket memory
7012 * @memcg: memcg to uncharge
7013 * @nr_pages: number of pages to uncharge
7014 */
7015void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7016{
7017        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7018                page_counter_uncharge(&memcg->tcpmem, nr_pages);
7019                return;
7020        }
7021
7022        mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7023
7024        refill_stock(memcg, nr_pages);
7025}
7026
7027static int __init cgroup_memory(char *s)
7028{
7029        char *token;
7030
7031        while ((token = strsep(&s, ",")) != NULL) {
7032                if (!*token)
7033                        continue;
7034                if (!strcmp(token, "nosocket"))
7035                        cgroup_memory_nosocket = true;
7036                if (!strcmp(token, "nokmem"))
7037                        cgroup_memory_nokmem = true;
7038        }
7039        return 0;
7040}
7041__setup("cgroup.memory=", cgroup_memory);
7042
7043/*
7044 * subsys_initcall() for memory controller.
7045 *
7046 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7047 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7048 * basically everything that doesn't depend on a specific mem_cgroup structure
7049 * should be initialized from here.
7050 */
7051static int __init mem_cgroup_init(void)
7052{
7053        int cpu, node;
7054
7055        /*
7056         * Currently s32 type (can refer to struct batched_lruvec_stat) is
7057         * used for per-memcg-per-cpu caching of per-node statistics. In order
7058         * to work fine, we should make sure that the overfill threshold can't
7059         * exceed S32_MAX / PAGE_SIZE.
7060         */
7061        BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7062
7063        cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7064                                  memcg_hotplug_cpu_dead);
7065
7066        for_each_possible_cpu(cpu)
7067                INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7068                          drain_local_stock);
7069
7070        for_each_node(node) {
7071                struct mem_cgroup_tree_per_node *rtpn;
7072
7073                rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7074                                    node_online(node) ? node : NUMA_NO_NODE);
7075
7076                rtpn->rb_root = RB_ROOT;
7077                rtpn->rb_rightmost = NULL;
7078                spin_lock_init(&rtpn->lock);
7079                soft_limit_tree.rb_tree_per_node[node] = rtpn;
7080        }
7081
7082        return 0;
7083}
7084subsys_initcall(mem_cgroup_init);
7085
7086#ifdef CONFIG_MEMCG_SWAP
7087static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7088{
7089        while (!refcount_inc_not_zero(&memcg->id.ref)) {
7090                /*
7091                 * The root cgroup cannot be destroyed, so it's refcount must
7092                 * always be >= 1.
7093                 */
7094                if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7095                        VM_BUG_ON(1);
7096                        break;
7097                }
7098                memcg = parent_mem_cgroup(memcg);
7099                if (!memcg)
7100                        memcg = root_mem_cgroup;
7101        }
7102        return memcg;
7103}
7104
7105/**
7106 * mem_cgroup_swapout - transfer a memsw charge to swap
7107 * @page: page whose memsw charge to transfer
7108 * @entry: swap entry to move the charge to
7109 *
7110 * Transfer the memsw charge of @page to @entry.
7111 */
7112void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7113{
7114        struct mem_cgroup *memcg, *swap_memcg;
7115        unsigned int nr_entries;
7116        unsigned short oldid;
7117
7118        VM_BUG_ON_PAGE(PageLRU(page), page);
7119        VM_BUG_ON_PAGE(page_count(page), page);
7120
7121        if (mem_cgroup_disabled())
7122                return;
7123
7124        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7125                return;
7126
7127        memcg = page_memcg(page);
7128
7129        VM_WARN_ON_ONCE_PAGE(!memcg, page);
7130        if (!memcg)
7131                return;
7132
7133        /*
7134         * In case the memcg owning these pages has been offlined and doesn't
7135         * have an ID allocated to it anymore, charge the closest online
7136         * ancestor for the swap instead and transfer the memory+swap charge.
7137         */
7138        swap_memcg = mem_cgroup_id_get_online(memcg);
7139        nr_entries = thp_nr_pages(page);
7140        /* Get references for the tail pages, too */
7141        if (nr_entries > 1)
7142                mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7143        oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7144                                   nr_entries);
7145        VM_BUG_ON_PAGE(oldid, page);
7146        mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7147
7148        page->memcg_data = 0;
7149
7150        if (!mem_cgroup_is_root(memcg))
7151                page_counter_uncharge(&memcg->memory, nr_entries);
7152
7153        if (!cgroup_memory_noswap && memcg != swap_memcg) {
7154                if (!mem_cgroup_is_root(swap_memcg))
7155                        page_counter_charge(&swap_memcg->memsw, nr_entries);
7156                page_counter_uncharge(&memcg->memsw, nr_entries);
7157        }
7158
7159        /*
7160         * Interrupts should be disabled here because the caller holds the
7161         * i_pages lock which is taken with interrupts-off. It is
7162         * important here to have the interrupts disabled because it is the
7163         * only synchronisation we have for updating the per-CPU variables.
7164         */
7165        VM_BUG_ON(!irqs_disabled());
7166        mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7167        memcg_check_events(memcg, page);
7168
7169        css_put(&memcg->css);
7170}
7171
7172/**
7173 * mem_cgroup_try_charge_swap - try charging swap space for a page
7174 * @page: page being added to swap
7175 * @entry: swap entry to charge
7176 *
7177 * Try to charge @page's memcg for the swap space at @entry.
7178 *
7179 * Returns 0 on success, -ENOMEM on failure.
7180 */
7181int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7182{
7183        unsigned int nr_pages = thp_nr_pages(page);
7184        struct page_counter *counter;
7185        struct mem_cgroup *memcg;
7186        unsigned short oldid;
7187
7188        if (mem_cgroup_disabled())
7189                return 0;
7190
7191        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7192                return 0;
7193
7194        memcg = page_memcg(page);
7195
7196        VM_WARN_ON_ONCE_PAGE(!memcg, page);
7197        if (!memcg)
7198                return 0;
7199
7200        if (!entry.val) {
7201                memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7202                return 0;
7203        }
7204
7205        memcg = mem_cgroup_id_get_online(memcg);
7206
7207        if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7208            !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7209                memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7210                memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7211                mem_cgroup_id_put(memcg);
7212                return -ENOMEM;
7213        }
7214
7215        /* Get references for the tail pages, too */
7216        if (nr_pages > 1)
7217                mem_cgroup_id_get_many(memcg, nr_pages - 1);
7218        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7219        VM_BUG_ON_PAGE(oldid, page);
7220        mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7221
7222        return 0;
7223}
7224
7225/**
7226 * mem_cgroup_uncharge_swap - uncharge swap space
7227 * @entry: swap entry to uncharge
7228 * @nr_pages: the amount of swap space to uncharge
7229 */
7230void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7231{
7232        struct mem_cgroup *memcg;
7233        unsigned short id;
7234
7235        id = swap_cgroup_record(entry, 0, nr_pages);
7236        rcu_read_lock();
7237        memcg = mem_cgroup_from_id(id);
7238        if (memcg) {
7239                if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7240                        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7241                                page_counter_uncharge(&memcg->swap, nr_pages);
7242                        else
7243                                page_counter_uncharge(&memcg->memsw, nr_pages);
7244                }
7245                mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7246                mem_cgroup_id_put_many(memcg, nr_pages);
7247        }
7248        rcu_read_unlock();
7249}
7250
7251long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7252{
7253        long nr_swap_pages = get_nr_swap_pages();
7254
7255        if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7256                return nr_swap_pages;
7257        for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7258                nr_swap_pages = min_t(long, nr_swap_pages,
7259                                      READ_ONCE(memcg->swap.max) -
7260                                      page_counter_read(&memcg->swap));
7261        return nr_swap_pages;
7262}
7263
7264bool mem_cgroup_swap_full(struct page *page)
7265{
7266        struct mem_cgroup *memcg;
7267
7268        VM_BUG_ON_PAGE(!PageLocked(page), page);
7269
7270        if (vm_swap_full())
7271                return true;
7272        if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7273                return false;
7274
7275        memcg = page_memcg(page);
7276        if (!memcg)
7277                return false;
7278
7279        for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7280                unsigned long usage = page_counter_read(&memcg->swap);
7281
7282                if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7283                    usage * 2 >= READ_ONCE(memcg->swap.max))
7284                        return true;
7285        }
7286
7287        return false;
7288}
7289
7290static int __init setup_swap_account(char *s)
7291{
7292        if (!strcmp(s, "1"))
7293                cgroup_memory_noswap = false;
7294        else if (!strcmp(s, "0"))
7295                cgroup_memory_noswap = true;
7296        return 1;
7297}
7298__setup("swapaccount=", setup_swap_account);
7299
7300static u64 swap_current_read(struct cgroup_subsys_state *css,
7301                             struct cftype *cft)
7302{
7303        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7304
7305        return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7306}
7307
7308static int swap_high_show(struct seq_file *m, void *v)
7309{
7310        return seq_puts_memcg_tunable(m,
7311                READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7312}
7313
7314static ssize_t swap_high_write(struct kernfs_open_file *of,
7315                               char *buf, size_t nbytes, loff_t off)
7316{
7317        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7318        unsigned long high;
7319        int err;
7320
7321        buf = strstrip(buf);
7322        err = page_counter_memparse(buf, "max", &high);
7323        if (err)
7324                return err;
7325
7326        page_counter_set_high(&memcg->swap, high);
7327
7328        return nbytes;
7329}
7330
7331static int swap_max_show(struct seq_file *m, void *v)
7332{
7333        return seq_puts_memcg_tunable(m,
7334                READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7335}
7336
7337static ssize_t swap_max_write(struct kernfs_open_file *of,
7338                              char *buf, size_t nbytes, loff_t off)
7339{
7340        struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7341        unsigned long max;
7342        int err;
7343
7344        buf = strstrip(buf);
7345        err = page_counter_memparse(buf, "max", &max);
7346        if (err)
7347                return err;
7348
7349        xchg(&memcg->swap.max, max);
7350
7351        return nbytes;
7352}
7353
7354static int swap_events_show(struct seq_file *m, void *v)
7355{
7356        struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7357
7358        seq_printf(m, "high %lu\n",
7359                   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7360        seq_printf(m, "max %lu\n",
7361                   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7362        seq_printf(m, "fail %lu\n",
7363                   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7364
7365        return 0;
7366}
7367
7368static struct cftype swap_files[] = {
7369        {
7370                .name = "swap.current",
7371                .flags = CFTYPE_NOT_ON_ROOT,
7372                .read_u64 = swap_current_read,
7373        },
7374        {
7375                .name = "swap.high",
7376                .flags = CFTYPE_NOT_ON_ROOT,
7377                .seq_show = swap_high_show,
7378                .write = swap_high_write,
7379        },
7380        {
7381                .name = "swap.max",
7382                .flags = CFTYPE_NOT_ON_ROOT,
7383                .seq_show = swap_max_show,
7384                .write = swap_max_write,
7385        },
7386        {
7387                .name = "swap.events",
7388                .flags = CFTYPE_NOT_ON_ROOT,
7389                .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7390                .seq_show = swap_events_show,
7391        },
7392        { }     /* terminate */
7393};
7394
7395static struct cftype memsw_files[] = {
7396        {
7397                .name = "memsw.usage_in_bytes",
7398                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7399                .read_u64 = mem_cgroup_read_u64,
7400        },
7401        {
7402                .name = "memsw.max_usage_in_bytes",
7403                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7404                .write = mem_cgroup_reset,
7405                .read_u64 = mem_cgroup_read_u64,
7406        },
7407        {
7408                .name = "memsw.limit_in_bytes",
7409                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7410                .write = mem_cgroup_write,
7411                .read_u64 = mem_cgroup_read_u64,
7412        },
7413        {
7414                .name = "memsw.failcnt",
7415                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7416                .write = mem_cgroup_reset,
7417                .read_u64 = mem_cgroup_read_u64,
7418        },
7419        { },    /* terminate */
7420};
7421
7422/*
7423 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7424 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7425 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7426 * boot parameter. This may result in premature OOPS inside
7427 * mem_cgroup_get_nr_swap_pages() function in corner cases.
7428 */
7429static int __init mem_cgroup_swap_init(void)
7430{
7431        /* No memory control -> no swap control */
7432        if (mem_cgroup_disabled())
7433                cgroup_memory_noswap = true;
7434
7435        if (cgroup_memory_noswap)
7436                return 0;
7437
7438        WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7439        WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7440
7441        return 0;
7442}
7443core_initcall(mem_cgroup_swap_init);
7444
7445#endif /* CONFIG_MEMCG_SWAP */
7446