linux/include/linux/vmstat.h
<<
>>
Prefs
   1#ifndef _LINUX_VMSTAT_H
   2#define _LINUX_VMSTAT_H
   3
   4#include <linux/types.h>
   5#include <linux/percpu.h>
   6#include <linux/mm.h>
   7#include <linux/mmzone.h>
   8#include <asm/atomic.h>
   9
  10#ifdef CONFIG_ZONE_DMA
  11#define DMA_ZONE(xx) xx##_DMA,
  12#else
  13#define DMA_ZONE(xx)
  14#endif
  15
  16#ifdef CONFIG_ZONE_DMA32
  17#define DMA32_ZONE(xx) xx##_DMA32,
  18#else
  19#define DMA32_ZONE(xx)
  20#endif
  21
  22#ifdef CONFIG_HIGHMEM
  23#define HIGHMEM_ZONE(xx) , xx##_HIGH
  24#else
  25#define HIGHMEM_ZONE(xx)
  26#endif
  27
  28
  29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
  30
  31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
  32                FOR_ALL_ZONES(PGALLOC),
  33                PGFREE, PGACTIVATE, PGDEACTIVATE,
  34                PGFAULT, PGMAJFAULT,
  35                FOR_ALL_ZONES(PGREFILL),
  36                FOR_ALL_ZONES(PGSTEAL),
  37                FOR_ALL_ZONES(PGSCAN_KSWAPD),
  38                FOR_ALL_ZONES(PGSCAN_DIRECT),
  39#ifdef CONFIG_NUMA
  40                PGSCAN_ZONE_RECLAIM_FAILED,
  41#endif
  42                PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
  43                PAGEOUTRUN, ALLOCSTALL, PGROTATED,
  44#ifdef CONFIG_HUGETLB_PAGE
  45                HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
  46#endif
  47                UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
  48                UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
  49                UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
  50                UNEVICTABLE_PGMLOCKED,
  51                UNEVICTABLE_PGMUNLOCKED,
  52                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
  53                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
  54                UNEVICTABLE_MLOCKFREED,
  55                NR_VM_EVENT_ITEMS
  56};
  57
  58extern int sysctl_stat_interval;
  59
  60#ifdef CONFIG_VM_EVENT_COUNTERS
  61/*
  62 * Light weight per cpu counter implementation.
  63 *
  64 * Counters should only be incremented and no critical kernel component
  65 * should rely on the counter values.
  66 *
  67 * Counters are handled completely inline. On many platforms the code
  68 * generated will simply be the increment of a global address.
  69 */
  70
  71struct vm_event_state {
  72        unsigned long event[NR_VM_EVENT_ITEMS];
  73};
  74
  75DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  76
  77static inline void __count_vm_event(enum vm_event_item item)
  78{
  79        __get_cpu_var(vm_event_states).event[item]++;
  80}
  81
  82static inline void count_vm_event(enum vm_event_item item)
  83{
  84        get_cpu_var(vm_event_states).event[item]++;
  85        put_cpu();
  86}
  87
  88static inline void __count_vm_events(enum vm_event_item item, long delta)
  89{
  90        __get_cpu_var(vm_event_states).event[item] += delta;
  91}
  92
  93static inline void count_vm_events(enum vm_event_item item, long delta)
  94{
  95        get_cpu_var(vm_event_states).event[item] += delta;
  96        put_cpu();
  97}
  98
  99extern void all_vm_events(unsigned long *);
 100#ifdef CONFIG_HOTPLUG
 101extern void vm_events_fold_cpu(int cpu);
 102#else
 103static inline void vm_events_fold_cpu(int cpu)
 104{
 105}
 106#endif
 107
 108#else
 109
 110/* Disable counters */
 111static inline void count_vm_event(enum vm_event_item item)
 112{
 113}
 114static inline void count_vm_events(enum vm_event_item item, long delta)
 115{
 116}
 117static inline void __count_vm_event(enum vm_event_item item)
 118{
 119}
 120static inline void __count_vm_events(enum vm_event_item item, long delta)
 121{
 122}
 123static inline void all_vm_events(unsigned long *ret)
 124{
 125}
 126static inline void vm_events_fold_cpu(int cpu)
 127{
 128}
 129
 130#endif /* CONFIG_VM_EVENT_COUNTERS */
 131
 132#define __count_zone_vm_events(item, zone, delta) \
 133                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
 134                zone_idx(zone), delta)
 135
 136/*
 137 * Zone based page accounting with per cpu differentials.
 138 */
 139extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 140
 141static inline void zone_page_state_add(long x, struct zone *zone,
 142                                 enum zone_stat_item item)
 143{
 144        atomic_long_add(x, &zone->vm_stat[item]);
 145        atomic_long_add(x, &vm_stat[item]);
 146}
 147
 148static inline unsigned long global_page_state(enum zone_stat_item item)
 149{
 150        long x = atomic_long_read(&vm_stat[item]);
 151#ifdef CONFIG_SMP
 152        if (x < 0)
 153                x = 0;
 154#endif
 155        return x;
 156}
 157
 158static inline unsigned long zone_page_state(struct zone *zone,
 159                                        enum zone_stat_item item)
 160{
 161        long x = atomic_long_read(&zone->vm_stat[item]);
 162#ifdef CONFIG_SMP
 163        if (x < 0)
 164                x = 0;
 165#endif
 166        return x;
 167}
 168
 169extern unsigned long global_reclaimable_pages(void);
 170extern unsigned long zone_reclaimable_pages(struct zone *zone);
 171
 172#ifdef CONFIG_NUMA
 173/*
 174 * Determine the per node value of a stat item. This function
 175 * is called frequently in a NUMA machine, so try to be as
 176 * frugal as possible.
 177 */
 178static inline unsigned long node_page_state(int node,
 179                                 enum zone_stat_item item)
 180{
 181        struct zone *zones = NODE_DATA(node)->node_zones;
 182
 183        return
 184#ifdef CONFIG_ZONE_DMA
 185                zone_page_state(&zones[ZONE_DMA], item) +
 186#endif
 187#ifdef CONFIG_ZONE_DMA32
 188                zone_page_state(&zones[ZONE_DMA32], item) +
 189#endif
 190#ifdef CONFIG_HIGHMEM
 191                zone_page_state(&zones[ZONE_HIGHMEM], item) +
 192#endif
 193                zone_page_state(&zones[ZONE_NORMAL], item) +
 194                zone_page_state(&zones[ZONE_MOVABLE], item);
 195}
 196
 197extern void zone_statistics(struct zone *, struct zone *);
 198
 199#else
 200
 201#define node_page_state(node, item) global_page_state(item)
 202#define zone_statistics(_zl,_z) do { } while (0)
 203
 204#endif /* CONFIG_NUMA */
 205
 206#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 207#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 208
 209static inline void zap_zone_vm_stats(struct zone *zone)
 210{
 211        memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
 212}
 213
 214extern void inc_zone_state(struct zone *, enum zone_stat_item);
 215
 216#ifdef CONFIG_SMP
 217void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
 218void __inc_zone_page_state(struct page *, enum zone_stat_item);
 219void __dec_zone_page_state(struct page *, enum zone_stat_item);
 220
 221void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
 222void inc_zone_page_state(struct page *, enum zone_stat_item);
 223void dec_zone_page_state(struct page *, enum zone_stat_item);
 224
 225extern void inc_zone_state(struct zone *, enum zone_stat_item);
 226extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 227extern void dec_zone_state(struct zone *, enum zone_stat_item);
 228extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 229
 230void refresh_cpu_vm_stats(int);
 231#else /* CONFIG_SMP */
 232
 233/*
 234 * We do not maintain differentials in a single processor configuration.
 235 * The functions directly modify the zone and global counters.
 236 */
 237static inline void __mod_zone_page_state(struct zone *zone,
 238                        enum zone_stat_item item, int delta)
 239{
 240        zone_page_state_add(delta, zone, item);
 241}
 242
 243static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 244{
 245        atomic_long_inc(&zone->vm_stat[item]);
 246        atomic_long_inc(&vm_stat[item]);
 247}
 248
 249static inline void __inc_zone_page_state(struct page *page,
 250                        enum zone_stat_item item)
 251{
 252        __inc_zone_state(page_zone(page), item);
 253}
 254
 255static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 256{
 257        atomic_long_dec(&zone->vm_stat[item]);
 258        atomic_long_dec(&vm_stat[item]);
 259}
 260
 261static inline void __dec_zone_page_state(struct page *page,
 262                        enum zone_stat_item item)
 263{
 264        __dec_zone_state(page_zone(page), item);
 265}
 266
 267/*
 268 * We only use atomic operations to update counters. So there is no need to
 269 * disable interrupts.
 270 */
 271#define inc_zone_page_state __inc_zone_page_state
 272#define dec_zone_page_state __dec_zone_page_state
 273#define mod_zone_page_state __mod_zone_page_state
 274
 275static inline void refresh_cpu_vm_stats(int cpu) { }
 276#endif
 277
 278#endif /* _LINUX_VMSTAT_H */
 279