linux/include/linux/page_cgroup.h
<<
>>
Prefs
   1#ifndef __LINUX_PAGE_CGROUP_H
   2#define __LINUX_PAGE_CGROUP_H
   3
   4#ifdef CONFIG_CGROUP_MEM_RES_CTLR
   5#include <linux/bit_spinlock.h>
   6/*
   7 * Page Cgroup can be considered as an extended mem_map.
   8 * A page_cgroup page is associated with every page descriptor. The
   9 * page_cgroup helps us identify information about the cgroup
  10 * All page cgroups are allocated at boot or memory hotplug event,
  11 * then the page cgroup for pfn always exists.
  12 */
  13struct page_cgroup {
  14        unsigned long flags;
  15        struct mem_cgroup *mem_cgroup;
  16        struct page *page;
  17        struct list_head lru;           /* per cgroup LRU list */
  18};
  19
  20void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
  21
  22#ifdef CONFIG_SPARSEMEM
  23static inline void __init page_cgroup_init_flatmem(void)
  24{
  25}
  26extern void __init page_cgroup_init(void);
  27#else
  28void __init page_cgroup_init_flatmem(void);
  29static inline void __init page_cgroup_init(void)
  30{
  31}
  32#endif
  33
  34struct page_cgroup *lookup_page_cgroup(struct page *page);
  35
  36enum {
  37        /* flags for mem_cgroup */
  38        PCG_LOCK,  /* Lock for pc->mem_cgroup and following bits. */
  39        PCG_CACHE, /* charged as cache */
  40        PCG_USED, /* this object is in use. */
  41        PCG_MIGRATION, /* under page migration */
  42        /* flags for mem_cgroup and file and I/O status */
  43        PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
  44        PCG_FILE_MAPPED, /* page is accounted as "mapped" */
  45        /* No lock in page_cgroup */
  46        PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
  47};
  48
  49#define TESTPCGFLAG(uname, lname)                       \
  50static inline int PageCgroup##uname(struct page_cgroup *pc)     \
  51        { return test_bit(PCG_##lname, &pc->flags); }
  52
  53#define SETPCGFLAG(uname, lname)                        \
  54static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
  55        { set_bit(PCG_##lname, &pc->flags);  }
  56
  57#define CLEARPCGFLAG(uname, lname)                      \
  58static inline void ClearPageCgroup##uname(struct page_cgroup *pc)       \
  59        { clear_bit(PCG_##lname, &pc->flags);  }
  60
  61#define TESTCLEARPCGFLAG(uname, lname)                  \
  62static inline int TestClearPageCgroup##uname(struct page_cgroup *pc)    \
  63        { return test_and_clear_bit(PCG_##lname, &pc->flags);  }
  64
  65/* Cache flag is set only once (at allocation) */
  66TESTPCGFLAG(Cache, CACHE)
  67CLEARPCGFLAG(Cache, CACHE)
  68SETPCGFLAG(Cache, CACHE)
  69
  70TESTPCGFLAG(Used, USED)
  71CLEARPCGFLAG(Used, USED)
  72SETPCGFLAG(Used, USED)
  73
  74SETPCGFLAG(AcctLRU, ACCT_LRU)
  75CLEARPCGFLAG(AcctLRU, ACCT_LRU)
  76TESTPCGFLAG(AcctLRU, ACCT_LRU)
  77TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
  78
  79
  80SETPCGFLAG(FileMapped, FILE_MAPPED)
  81CLEARPCGFLAG(FileMapped, FILE_MAPPED)
  82TESTPCGFLAG(FileMapped, FILE_MAPPED)
  83
  84SETPCGFLAG(Migration, MIGRATION)
  85CLEARPCGFLAG(Migration, MIGRATION)
  86TESTPCGFLAG(Migration, MIGRATION)
  87
  88static inline int page_cgroup_nid(struct page_cgroup *pc)
  89{
  90        return page_to_nid(pc->page);
  91}
  92
  93static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
  94{
  95        return page_zonenum(pc->page);
  96}
  97
  98static inline void lock_page_cgroup(struct page_cgroup *pc)
  99{
 100        /*
 101         * Don't take this lock in IRQ context.
 102         * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
 103         */
 104        bit_spin_lock(PCG_LOCK, &pc->flags);
 105}
 106
 107static inline void unlock_page_cgroup(struct page_cgroup *pc)
 108{
 109        bit_spin_unlock(PCG_LOCK, &pc->flags);
 110}
 111
 112static inline int page_is_cgroup_locked(struct page_cgroup *pc)
 113{
 114        return bit_spin_is_locked(PCG_LOCK, &pc->flags);
 115}
 116
 117static inline void move_lock_page_cgroup(struct page_cgroup *pc,
 118        unsigned long *flags)
 119{
 120        /*
 121         * We know updates to pc->flags of page cache's stats are from both of
 122         * usual context or IRQ context. Disable IRQ to avoid deadlock.
 123         */
 124        local_irq_save(*flags);
 125        bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
 126}
 127
 128static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
 129        unsigned long *flags)
 130{
 131        bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
 132        local_irq_restore(*flags);
 133}
 134
 135#else /* CONFIG_CGROUP_MEM_RES_CTLR */
 136struct page_cgroup;
 137
 138static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
 139{
 140}
 141
 142static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
 143{
 144        return NULL;
 145}
 146
 147static inline void page_cgroup_init(void)
 148{
 149}
 150
 151static inline void __init page_cgroup_init_flatmem(void)
 152{
 153}
 154
 155#endif
 156
 157#include <linux/swap.h>
 158
 159#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 160extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
 161                                        unsigned short old, unsigned short new);
 162extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
 163extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
 164extern int swap_cgroup_swapon(int type, unsigned long max_pages);
 165extern void swap_cgroup_swapoff(int type);
 166#else
 167
 168static inline
 169unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
 170{
 171        return 0;
 172}
 173
 174static inline
 175unsigned short lookup_swap_cgroup(swp_entry_t ent)
 176{
 177        return 0;
 178}
 179
 180static inline int
 181swap_cgroup_swapon(int type, unsigned long max_pages)
 182{
 183        return 0;
 184}
 185
 186static inline void swap_cgroup_swapoff(int type)
 187{
 188        return;
 189}
 190
 191#endif
 192#endif
 193