linux/include/linux/memcontrol.h
<<
>>
Prefs
   1/* memcontrol.h - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#ifndef _LINUX_MEMCONTROL_H
  21#define _LINUX_MEMCONTROL_H
  22#include <linux/cgroup.h>
  23#include <linux/vm_event_item.h>
  24#include <linux/hardirq.h>
  25#include <linux/jump_label.h>
  26
  27struct mem_cgroup;
  28struct page_cgroup;
  29struct page;
  30struct mm_struct;
  31struct kmem_cache;
  32
  33/* Stats that can be updated by kernel. */
  34enum mem_cgroup_page_stat_item {
  35        MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
  36};
  37
  38struct mem_cgroup_reclaim_cookie {
  39        struct zone *zone;
  40        int priority;
  41        unsigned int generation;
  42};
  43
  44#ifdef CONFIG_MEMCG
  45/*
  46 * All "charge" functions with gfp_mask should use GFP_KERNEL or
  47 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
  48 * alloc memory but reclaims memory from all available zones. So, "where I want
  49 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
  50 * available but adding a rule is better. charge functions' gfp_mask should
  51 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
  52 * codes.
  53 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
  54 */
  55
  56extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
  57                                gfp_t gfp_mask);
  58/* for swap handling */
  59extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  60                struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
  61extern void mem_cgroup_commit_charge_swapin(struct page *page,
  62                                        struct mem_cgroup *memcg);
  63extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
  64
  65extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  66                                        gfp_t gfp_mask);
  67
  68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
  69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
  70
  71/* For coalescing uncharge for reducing memcg' overhead*/
  72extern void mem_cgroup_uncharge_start(void);
  73extern void mem_cgroup_uncharge_end(void);
  74
  75extern void mem_cgroup_uncharge_page(struct page *page);
  76extern void mem_cgroup_uncharge_cache_page(struct page *page);
  77
  78bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  79                                  struct mem_cgroup *memcg);
  80bool task_in_mem_cgroup(struct task_struct *task,
  81                        const struct mem_cgroup *memcg);
  82
  83extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
  84extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  85extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
  86
  87extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
  88extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
  89
  90static inline
  91bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
  92{
  93        struct mem_cgroup *task_memcg;
  94        bool match;
  95
  96        rcu_read_lock();
  97        task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  98        match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
  99        rcu_read_unlock();
 100        return match;
 101}
 102
 103extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
 104
 105extern void
 106mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 107                             struct mem_cgroup **memcgp);
 108extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
 109        struct page *oldpage, struct page *newpage, bool migration_ok);
 110
 111struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
 112                                   struct mem_cgroup *,
 113                                   struct mem_cgroup_reclaim_cookie *);
 114void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 115
 116/*
 117 * For memory reclaim.
 118 */
 119int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
 120int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 121unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
 122void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
 123extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 124                                        struct task_struct *p);
 125extern void mem_cgroup_replace_page_cache(struct page *oldpage,
 126                                        struct page *newpage);
 127
 128#ifdef CONFIG_MEMCG_SWAP
 129extern int do_swap_account;
 130#endif
 131
 132static inline bool mem_cgroup_disabled(void)
 133{
 134        if (mem_cgroup_subsys.disabled)
 135                return true;
 136        return false;
 137}
 138
 139void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
 140                                         unsigned long *flags);
 141
 142extern atomic_t memcg_moving;
 143
 144static inline void mem_cgroup_begin_update_page_stat(struct page *page,
 145                                        bool *locked, unsigned long *flags)
 146{
 147        if (mem_cgroup_disabled())
 148                return;
 149        rcu_read_lock();
 150        *locked = false;
 151        if (atomic_read(&memcg_moving))
 152                __mem_cgroup_begin_update_page_stat(page, locked, flags);
 153}
 154
 155void __mem_cgroup_end_update_page_stat(struct page *page,
 156                                unsigned long *flags);
 157static inline void mem_cgroup_end_update_page_stat(struct page *page,
 158                                        bool *locked, unsigned long *flags)
 159{
 160        if (mem_cgroup_disabled())
 161                return;
 162        if (*locked)
 163                __mem_cgroup_end_update_page_stat(page, flags);
 164        rcu_read_unlock();
 165}
 166
 167void mem_cgroup_update_page_stat(struct page *page,
 168                                 enum mem_cgroup_page_stat_item idx,
 169                                 int val);
 170
 171static inline void mem_cgroup_inc_page_stat(struct page *page,
 172                                            enum mem_cgroup_page_stat_item idx)
 173{
 174        mem_cgroup_update_page_stat(page, idx, 1);
 175}
 176
 177static inline void mem_cgroup_dec_page_stat(struct page *page,
 178                                            enum mem_cgroup_page_stat_item idx)
 179{
 180        mem_cgroup_update_page_stat(page, idx, -1);
 181}
 182
 183unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 184                                                gfp_t gfp_mask,
 185                                                unsigned long *total_scanned);
 186
 187void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 188static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
 189                                             enum vm_event_item idx)
 190{
 191        if (mem_cgroup_disabled())
 192                return;
 193        __mem_cgroup_count_vm_event(mm, idx);
 194}
 195#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 196void mem_cgroup_split_huge_fixup(struct page *head);
 197#endif
 198
 199#ifdef CONFIG_DEBUG_VM
 200bool mem_cgroup_bad_page_check(struct page *page);
 201void mem_cgroup_print_bad_page(struct page *page);
 202#endif
 203#else /* CONFIG_MEMCG */
 204struct mem_cgroup;
 205
 206static inline int mem_cgroup_newpage_charge(struct page *page,
 207                                        struct mm_struct *mm, gfp_t gfp_mask)
 208{
 209        return 0;
 210}
 211
 212static inline int mem_cgroup_cache_charge(struct page *page,
 213                                        struct mm_struct *mm, gfp_t gfp_mask)
 214{
 215        return 0;
 216}
 217
 218static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
 219                struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
 220{
 221        return 0;
 222}
 223
 224static inline void mem_cgroup_commit_charge_swapin(struct page *page,
 225                                          struct mem_cgroup *memcg)
 226{
 227}
 228
 229static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
 230{
 231}
 232
 233static inline void mem_cgroup_uncharge_start(void)
 234{
 235}
 236
 237static inline void mem_cgroup_uncharge_end(void)
 238{
 239}
 240
 241static inline void mem_cgroup_uncharge_page(struct page *page)
 242{
 243}
 244
 245static inline void mem_cgroup_uncharge_cache_page(struct page *page)
 246{
 247}
 248
 249static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
 250                                                    struct mem_cgroup *memcg)
 251{
 252        return &zone->lruvec;
 253}
 254
 255static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
 256                                                    struct zone *zone)
 257{
 258        return &zone->lruvec;
 259}
 260
 261static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 262{
 263        return NULL;
 264}
 265
 266static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 267{
 268        return NULL;
 269}
 270
 271static inline bool mm_match_cgroup(struct mm_struct *mm,
 272                struct mem_cgroup *memcg)
 273{
 274        return true;
 275}
 276
 277static inline bool task_in_mem_cgroup(struct task_struct *task,
 278                                      const struct mem_cgroup *memcg)
 279{
 280        return true;
 281}
 282
 283static inline struct cgroup_subsys_state
 284                *mem_cgroup_css(struct mem_cgroup *memcg)
 285{
 286        return NULL;
 287}
 288
 289static inline void
 290mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 291                             struct mem_cgroup **memcgp)
 292{
 293}
 294
 295static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
 296                struct page *oldpage, struct page *newpage, bool migration_ok)
 297{
 298}
 299
 300static inline struct mem_cgroup *
 301mem_cgroup_iter(struct mem_cgroup *root,
 302                struct mem_cgroup *prev,
 303                struct mem_cgroup_reclaim_cookie *reclaim)
 304{
 305        return NULL;
 306}
 307
 308static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
 309                                         struct mem_cgroup *prev)
 310{
 311}
 312
 313static inline bool mem_cgroup_disabled(void)
 314{
 315        return true;
 316}
 317
 318static inline int
 319mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 320{
 321        return 1;
 322}
 323
 324static inline unsigned long
 325mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 326{
 327        return 0;
 328}
 329
 330static inline void
 331mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 332                              int increment)
 333{
 334}
 335
 336static inline void
 337mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 338{
 339}
 340
 341static inline void mem_cgroup_begin_update_page_stat(struct page *page,
 342                                        bool *locked, unsigned long *flags)
 343{
 344}
 345
 346static inline void mem_cgroup_end_update_page_stat(struct page *page,
 347                                        bool *locked, unsigned long *flags)
 348{
 349}
 350
 351static inline void mem_cgroup_inc_page_stat(struct page *page,
 352                                            enum mem_cgroup_page_stat_item idx)
 353{
 354}
 355
 356static inline void mem_cgroup_dec_page_stat(struct page *page,
 357                                            enum mem_cgroup_page_stat_item idx)
 358{
 359}
 360
 361static inline
 362unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 363                                            gfp_t gfp_mask,
 364                                            unsigned long *total_scanned)
 365{
 366        return 0;
 367}
 368
 369static inline void mem_cgroup_split_huge_fixup(struct page *head)
 370{
 371}
 372
 373static inline
 374void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
 375{
 376}
 377static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
 378                                struct page *newpage)
 379{
 380}
 381#endif /* CONFIG_MEMCG */
 382
 383#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
 384static inline bool
 385mem_cgroup_bad_page_check(struct page *page)
 386{
 387        return false;
 388}
 389
 390static inline void
 391mem_cgroup_print_bad_page(struct page *page)
 392{
 393}
 394#endif
 395
 396enum {
 397        UNDER_LIMIT,
 398        SOFT_LIMIT,
 399        OVER_LIMIT,
 400};
 401
 402struct sock;
 403#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
 404void sock_update_memcg(struct sock *sk);
 405void sock_release_memcg(struct sock *sk);
 406#else
 407static inline void sock_update_memcg(struct sock *sk)
 408{
 409}
 410static inline void sock_release_memcg(struct sock *sk)
 411{
 412}
 413#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
 414
 415#ifdef CONFIG_MEMCG_KMEM
 416extern struct static_key memcg_kmem_enabled_key;
 417
 418extern int memcg_limited_groups_array_size;
 419
 420/*
 421 * Helper macro to loop through all memcg-specific caches. Callers must still
 422 * check if the cache is valid (it is either valid or NULL).
 423 * the slab_mutex must be held when looping through those caches
 424 */
 425#define for_each_memcg_cache_index(_idx)        \
 426        for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
 427
 428static inline bool memcg_kmem_enabled(void)
 429{
 430        return static_key_false(&memcg_kmem_enabled_key);
 431}
 432
 433/*
 434 * In general, we'll do everything in our power to not incur in any overhead
 435 * for non-memcg users for the kmem functions. Not even a function call, if we
 436 * can avoid it.
 437 *
 438 * Therefore, we'll inline all those functions so that in the best case, we'll
 439 * see that kmemcg is off for everybody and proceed quickly.  If it is on,
 440 * we'll still do most of the flag checking inline. We check a lot of
 441 * conditions, but because they are pretty simple, they are expected to be
 442 * fast.
 443 */
 444bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
 445                                        int order);
 446void __memcg_kmem_commit_charge(struct page *page,
 447                                       struct mem_cgroup *memcg, int order);
 448void __memcg_kmem_uncharge_pages(struct page *page, int order);
 449
 450int memcg_cache_id(struct mem_cgroup *memcg);
 451int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
 452                         struct kmem_cache *root_cache);
 453void memcg_release_cache(struct kmem_cache *cachep);
 454void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
 455
 456int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
 457void memcg_update_array_size(int num_groups);
 458
 459struct kmem_cache *
 460__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
 461
 462void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
 463void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
 464
 465/**
 466 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
 467 * @gfp: the gfp allocation flags.
 468 * @memcg: a pointer to the memcg this was charged against.
 469 * @order: allocation order.
 470 *
 471 * returns true if the memcg where the current task belongs can hold this
 472 * allocation.
 473 *
 474 * We return true automatically if this allocation is not to be accounted to
 475 * any memcg.
 476 */
 477static inline bool
 478memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
 479{
 480        if (!memcg_kmem_enabled())
 481                return true;
 482
 483        /*
 484         * __GFP_NOFAIL allocations will move on even if charging is not
 485         * possible. Therefore we don't even try, and have this allocation
 486         * unaccounted. We could in theory charge it with
 487         * res_counter_charge_nofail, but we hope those allocations are rare,
 488         * and won't be worth the trouble.
 489         */
 490        if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
 491                return true;
 492        if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
 493                return true;
 494
 495        /* If the test is dying, just let it go. */
 496        if (unlikely(fatal_signal_pending(current)))
 497                return true;
 498
 499        return __memcg_kmem_newpage_charge(gfp, memcg, order);
 500}
 501
 502/**
 503 * memcg_kmem_uncharge_pages: uncharge pages from memcg
 504 * @page: pointer to struct page being freed
 505 * @order: allocation order.
 506 *
 507 * there is no need to specify memcg here, since it is embedded in page_cgroup
 508 */
 509static inline void
 510memcg_kmem_uncharge_pages(struct page *page, int order)
 511{
 512        if (memcg_kmem_enabled())
 513                __memcg_kmem_uncharge_pages(page, order);
 514}
 515
 516/**
 517 * memcg_kmem_commit_charge: embeds correct memcg in a page
 518 * @page: pointer to struct page recently allocated
 519 * @memcg: the memcg structure we charged against
 520 * @order: allocation order.
 521 *
 522 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
 523 * failure of the allocation. if @page is NULL, this function will revert the
 524 * charges. Otherwise, it will commit the memcg given by @memcg to the
 525 * corresponding page_cgroup.
 526 */
 527static inline void
 528memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
 529{
 530        if (memcg_kmem_enabled() && memcg)
 531                __memcg_kmem_commit_charge(page, memcg, order);
 532}
 533
 534/**
 535 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
 536 * @cachep: the original global kmem cache
 537 * @gfp: allocation flags.
 538 *
 539 * This function assumes that the task allocating, which determines the memcg
 540 * in the page allocator, belongs to the same cgroup throughout the whole
 541 * process.  Misacounting can happen if the task calls memcg_kmem_get_cache()
 542 * while belonging to a cgroup, and later on changes. This is considered
 543 * acceptable, and should only happen upon task migration.
 544 *
 545 * Before the cache is created by the memcg core, there is also a possible
 546 * imbalance: the task belongs to a memcg, but the cache being allocated from
 547 * is the global cache, since the child cache is not yet guaranteed to be
 548 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
 549 * passed and the page allocator will not attempt any cgroup accounting.
 550 */
 551static __always_inline struct kmem_cache *
 552memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 553{
 554        if (!memcg_kmem_enabled())
 555                return cachep;
 556        if (gfp & __GFP_NOFAIL)
 557                return cachep;
 558        if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
 559                return cachep;
 560        if (unlikely(fatal_signal_pending(current)))
 561                return cachep;
 562
 563        return __memcg_kmem_get_cache(cachep, gfp);
 564}
 565#else
 566#define for_each_memcg_cache_index(_idx)        \
 567        for (; NULL; )
 568
 569static inline bool memcg_kmem_enabled(void)
 570{
 571        return false;
 572}
 573
 574static inline bool
 575memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
 576{
 577        return true;
 578}
 579
 580static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
 581{
 582}
 583
 584static inline void
 585memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
 586{
 587}
 588
 589static inline int memcg_cache_id(struct mem_cgroup *memcg)
 590{
 591        return -1;
 592}
 593
 594static inline int
 595memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
 596                     struct kmem_cache *root_cache)
 597{
 598        return 0;
 599}
 600
 601static inline void memcg_release_cache(struct kmem_cache *cachep)
 602{
 603}
 604
 605static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
 606                                        struct kmem_cache *s)
 607{
 608}
 609
 610static inline struct kmem_cache *
 611memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 612{
 613        return cachep;
 614}
 615
 616static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
 617{
 618}
 619#endif /* CONFIG_MEMCG_KMEM */
 620#endif /* _LINUX_MEMCONTROL_H */
 621
 622