linux/include/linux/vmstat.h
<<
>>
Prefs
   1#ifndef _LINUX_VMSTAT_H
   2#define _LINUX_VMSTAT_H
   3
   4#include <linux/types.h>
   5#include <linux/percpu.h>
   6#include <linux/mm.h>
   7#include <linux/mmzone.h>
   8#include <linux/vm_event_item.h>
   9#include <linux/atomic.h>
  10
  11extern int sysctl_stat_interval;
  12
  13#ifdef CONFIG_VM_EVENT_COUNTERS
  14/*
  15 * Light weight per cpu counter implementation.
  16 *
  17 * Counters should only be incremented and no critical kernel component
  18 * should rely on the counter values.
  19 *
  20 * Counters are handled completely inline. On many platforms the code
  21 * generated will simply be the increment of a global address.
  22 */
  23
  24struct vm_event_state {
  25        unsigned long event[NR_VM_EVENT_ITEMS];
  26};
  27
  28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  29
  30/*
  31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
  32 * local_irq_disable overhead.
  33 */
  34static inline void __count_vm_event(enum vm_event_item item)
  35{
  36        raw_cpu_inc(vm_event_states.event[item]);
  37}
  38
  39static inline void count_vm_event(enum vm_event_item item)
  40{
  41        this_cpu_inc(vm_event_states.event[item]);
  42}
  43
  44static inline void __count_vm_events(enum vm_event_item item, long delta)
  45{
  46        raw_cpu_add(vm_event_states.event[item], delta);
  47}
  48
  49static inline void count_vm_events(enum vm_event_item item, long delta)
  50{
  51        this_cpu_add(vm_event_states.event[item], delta);
  52}
  53
  54extern void all_vm_events(unsigned long *);
  55
  56extern void vm_events_fold_cpu(int cpu);
  57
  58#else
  59
  60/* Disable counters */
  61static inline void count_vm_event(enum vm_event_item item)
  62{
  63}
  64static inline void count_vm_events(enum vm_event_item item, long delta)
  65{
  66}
  67static inline void __count_vm_event(enum vm_event_item item)
  68{
  69}
  70static inline void __count_vm_events(enum vm_event_item item, long delta)
  71{
  72}
  73static inline void all_vm_events(unsigned long *ret)
  74{
  75}
  76static inline void vm_events_fold_cpu(int cpu)
  77{
  78}
  79
  80#endif /* CONFIG_VM_EVENT_COUNTERS */
  81
  82#ifdef CONFIG_NUMA_BALANCING
  83#define count_vm_numa_event(x)     count_vm_event(x)
  84#define count_vm_numa_events(x, y) count_vm_events(x, y)
  85#else
  86#define count_vm_numa_event(x) do {} while (0)
  87#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
  88#endif /* CONFIG_NUMA_BALANCING */
  89
  90#ifdef CONFIG_DEBUG_TLBFLUSH
  91#define count_vm_tlb_event(x)      count_vm_event(x)
  92#define count_vm_tlb_events(x, y)  count_vm_events(x, y)
  93#else
  94#define count_vm_tlb_event(x)     do {} while (0)
  95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
  96#endif
  97
  98#define __count_zone_vm_events(item, zone, delta) \
  99                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
 100                zone_idx(zone), delta)
 101
 102/*
 103 * Zone based page accounting with per cpu differentials.
 104 */
 105extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 106
 107static inline void zone_page_state_add(long x, struct zone *zone,
 108                                 enum zone_stat_item item)
 109{
 110        atomic_long_add(x, &zone->vm_stat[item]);
 111        atomic_long_add(x, &vm_stat[item]);
 112}
 113
 114static inline unsigned long global_page_state(enum zone_stat_item item)
 115{
 116        long x = atomic_long_read(&vm_stat[item]);
 117#ifdef CONFIG_SMP
 118        if (x < 0)
 119                x = 0;
 120#endif
 121        return x;
 122}
 123
 124static inline unsigned long zone_page_state(struct zone *zone,
 125                                        enum zone_stat_item item)
 126{
 127        long x = atomic_long_read(&zone->vm_stat[item]);
 128#ifdef CONFIG_SMP
 129        if (x < 0)
 130                x = 0;
 131#endif
 132        return x;
 133}
 134
 135/*
 136 * More accurate version that also considers the currently pending
 137 * deltas. For that we need to loop over all cpus to find the current
 138 * deltas. There is no synchronization so the result cannot be
 139 * exactly accurate either.
 140 */
 141static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 142                                        enum zone_stat_item item)
 143{
 144        long x = atomic_long_read(&zone->vm_stat[item]);
 145
 146#ifdef CONFIG_SMP
 147        int cpu;
 148        for_each_online_cpu(cpu)
 149                x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
 150
 151        if (x < 0)
 152                x = 0;
 153#endif
 154        return x;
 155}
 156
 157#ifdef CONFIG_NUMA
 158/*
 159 * Determine the per node value of a stat item. This function
 160 * is called frequently in a NUMA machine, so try to be as
 161 * frugal as possible.
 162 */
 163static inline unsigned long node_page_state(int node,
 164                                 enum zone_stat_item item)
 165{
 166        struct zone *zones = NODE_DATA(node)->node_zones;
 167
 168        return
 169#ifdef CONFIG_ZONE_DMA
 170                zone_page_state(&zones[ZONE_DMA], item) +
 171#endif
 172#ifdef CONFIG_ZONE_DMA32
 173                zone_page_state(&zones[ZONE_DMA32], item) +
 174#endif
 175#ifdef CONFIG_HIGHMEM
 176                zone_page_state(&zones[ZONE_HIGHMEM], item) +
 177#endif
 178                zone_page_state(&zones[ZONE_NORMAL], item) +
 179                zone_page_state(&zones[ZONE_MOVABLE], item);
 180}
 181
 182extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
 183
 184#else
 185
 186#define node_page_state(node, item) global_page_state(item)
 187#define zone_statistics(_zl, _z, gfp) do { } while (0)
 188
 189#endif /* CONFIG_NUMA */
 190
 191#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 192#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 193
 194#ifdef CONFIG_SMP
 195void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
 196void __inc_zone_page_state(struct page *, enum zone_stat_item);
 197void __dec_zone_page_state(struct page *, enum zone_stat_item);
 198
 199void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
 200void inc_zone_page_state(struct page *, enum zone_stat_item);
 201void dec_zone_page_state(struct page *, enum zone_stat_item);
 202
 203extern void inc_zone_state(struct zone *, enum zone_stat_item);
 204extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 205extern void dec_zone_state(struct zone *, enum zone_stat_item);
 206extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 207
 208void cpu_vm_stats_fold(int cpu);
 209void refresh_zone_stat_thresholds(void);
 210
 211void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
 212
 213int calculate_pressure_threshold(struct zone *zone);
 214int calculate_normal_threshold(struct zone *zone);
 215void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 216                                int (*calculate_pressure)(struct zone *));
 217#else /* CONFIG_SMP */
 218
 219/*
 220 * We do not maintain differentials in a single processor configuration.
 221 * The functions directly modify the zone and global counters.
 222 */
 223static inline void __mod_zone_page_state(struct zone *zone,
 224                        enum zone_stat_item item, int delta)
 225{
 226        zone_page_state_add(delta, zone, item);
 227}
 228
 229static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 230{
 231        atomic_long_inc(&zone->vm_stat[item]);
 232        atomic_long_inc(&vm_stat[item]);
 233}
 234
 235static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 236{
 237        atomic_long_dec(&zone->vm_stat[item]);
 238        atomic_long_dec(&vm_stat[item]);
 239}
 240
 241static inline void __inc_zone_page_state(struct page *page,
 242                        enum zone_stat_item item)
 243{
 244        __inc_zone_state(page_zone(page), item);
 245}
 246
 247static inline void __dec_zone_page_state(struct page *page,
 248                        enum zone_stat_item item)
 249{
 250        __dec_zone_state(page_zone(page), item);
 251}
 252
 253/*
 254 * We only use atomic operations to update counters. So there is no need to
 255 * disable interrupts.
 256 */
 257#define inc_zone_page_state __inc_zone_page_state
 258#define dec_zone_page_state __dec_zone_page_state
 259#define mod_zone_page_state __mod_zone_page_state
 260
 261#define inc_zone_state __inc_zone_state
 262#define dec_zone_state __dec_zone_state
 263
 264#define set_pgdat_percpu_threshold(pgdat, callback) { }
 265
 266static inline void refresh_cpu_vm_stats(int cpu) { }
 267static inline void refresh_zone_stat_thresholds(void) { }
 268static inline void cpu_vm_stats_fold(int cpu) { }
 269
 270static inline void drain_zonestat(struct zone *zone,
 271                        struct per_cpu_pageset *pset) { }
 272#endif          /* CONFIG_SMP */
 273
 274static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 275                                             int migratetype)
 276{
 277        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 278        if (is_migrate_cma(migratetype))
 279                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 280}
 281
 282extern const char * const vmstat_text[];
 283
 284#endif /* _LINUX_VMSTAT_H */
 285