linux/include/linux/vmstat.h
<<
>>
Prefs
   1#ifndef _LINUX_VMSTAT_H
   2#define _LINUX_VMSTAT_H
   3
   4#include <linux/types.h>
   5#include <linux/percpu.h>
   6#include <linux/mm.h>
   7#include <linux/mmzone.h>
   8#include <linux/vm_event_item.h>
   9#include <linux/atomic.h>
  10
  11extern int sysctl_stat_interval;
  12
  13#ifdef CONFIG_VM_EVENT_COUNTERS
  14/*
  15 * Light weight per cpu counter implementation.
  16 *
  17 * Counters should only be incremented and no critical kernel component
  18 * should rely on the counter values.
  19 *
  20 * Counters are handled completely inline. On many platforms the code
  21 * generated will simply be the increment of a global address.
  22 */
  23
  24struct vm_event_state {
  25        unsigned long event[NR_VM_EVENT_ITEMS];
  26};
  27
  28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  29
  30/*
  31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
  32 * local_irq_disable overhead.
  33 */
  34static inline void __count_vm_event(enum vm_event_item item)
  35{
  36        raw_cpu_inc(vm_event_states.event[item]);
  37}
  38
  39static inline void count_vm_event(enum vm_event_item item)
  40{
  41        this_cpu_inc(vm_event_states.event[item]);
  42}
  43
  44static inline void __count_vm_events(enum vm_event_item item, long delta)
  45{
  46        raw_cpu_add(vm_event_states.event[item], delta);
  47}
  48
  49static inline void count_vm_events(enum vm_event_item item, long delta)
  50{
  51        this_cpu_add(vm_event_states.event[item], delta);
  52}
  53
  54extern void all_vm_events(unsigned long *);
  55
  56extern void vm_events_fold_cpu(int cpu);
  57
  58#else
  59
  60/* Disable counters */
  61static inline void count_vm_event(enum vm_event_item item)
  62{
  63}
  64static inline void count_vm_events(enum vm_event_item item, long delta)
  65{
  66}
  67static inline void __count_vm_event(enum vm_event_item item)
  68{
  69}
  70static inline void __count_vm_events(enum vm_event_item item, long delta)
  71{
  72}
  73static inline void all_vm_events(unsigned long *ret)
  74{
  75}
  76static inline void vm_events_fold_cpu(int cpu)
  77{
  78}
  79
  80#endif /* CONFIG_VM_EVENT_COUNTERS */
  81
  82#ifdef CONFIG_NUMA_BALANCING
  83#define count_vm_numa_event(x)     count_vm_event(x)
  84#define count_vm_numa_events(x, y) count_vm_events(x, y)
  85#else
  86#define count_vm_numa_event(x) do {} while (0)
  87#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
  88#endif /* CONFIG_NUMA_BALANCING */
  89
  90#ifdef CONFIG_DEBUG_TLBFLUSH
  91#define count_vm_tlb_event(x)      count_vm_event(x)
  92#define count_vm_tlb_events(x, y)  count_vm_events(x, y)
  93#else
  94#define count_vm_tlb_event(x)     do {} while (0)
  95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
  96#endif
  97
  98#ifdef CONFIG_DEBUG_VM_VMACACHE
  99#define count_vm_vmacache_event(x) count_vm_event(x)
 100#else
 101#define count_vm_vmacache_event(x) do {} while (0)
 102#endif
 103
 104#define __count_zone_vm_events(item, zone, delta) \
 105                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
 106                zone_idx(zone), delta)
 107
 108/*
 109 * Zone based page accounting with per cpu differentials.
 110 */
 111extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 112
 113static inline void zone_page_state_add(long x, struct zone *zone,
 114                                 enum zone_stat_item item)
 115{
 116        atomic_long_add(x, &zone->vm_stat[item]);
 117        atomic_long_add(x, &vm_stat[item]);
 118}
 119
 120static inline unsigned long global_page_state(enum zone_stat_item item)
 121{
 122        long x = atomic_long_read(&vm_stat[item]);
 123#ifdef CONFIG_SMP
 124        if (x < 0)
 125                x = 0;
 126#endif
 127        return x;
 128}
 129
 130static inline unsigned long zone_page_state(struct zone *zone,
 131                                        enum zone_stat_item item)
 132{
 133        long x = atomic_long_read(&zone->vm_stat[item]);
 134#ifdef CONFIG_SMP
 135        if (x < 0)
 136                x = 0;
 137#endif
 138        return x;
 139}
 140
 141/*
 142 * More accurate version that also considers the currently pending
 143 * deltas. For that we need to loop over all cpus to find the current
 144 * deltas. There is no synchronization so the result cannot be
 145 * exactly accurate either.
 146 */
 147static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 148                                        enum zone_stat_item item)
 149{
 150        long x = atomic_long_read(&zone->vm_stat[item]);
 151
 152#ifdef CONFIG_SMP
 153        int cpu;
 154        for_each_online_cpu(cpu)
 155                x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
 156
 157        if (x < 0)
 158                x = 0;
 159#endif
 160        return x;
 161}
 162
 163#ifdef CONFIG_NUMA
 164
 165extern unsigned long node_page_state(int node, enum zone_stat_item item);
 166extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
 167
 168#else
 169
 170#define node_page_state(node, item) global_page_state(item)
 171#define zone_statistics(_zl, _z, gfp) do { } while (0)
 172
 173#endif /* CONFIG_NUMA */
 174
 175#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 176#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 177
 178#ifdef CONFIG_SMP
 179void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
 180void __inc_zone_page_state(struct page *, enum zone_stat_item);
 181void __dec_zone_page_state(struct page *, enum zone_stat_item);
 182
 183void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
 184void inc_zone_page_state(struct page *, enum zone_stat_item);
 185void dec_zone_page_state(struct page *, enum zone_stat_item);
 186
 187extern void inc_zone_state(struct zone *, enum zone_stat_item);
 188extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 189extern void dec_zone_state(struct zone *, enum zone_stat_item);
 190extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 191
 192void quiet_vmstat(void);
 193void cpu_vm_stats_fold(int cpu);
 194void refresh_zone_stat_thresholds(void);
 195
 196void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
 197
 198int calculate_pressure_threshold(struct zone *zone);
 199int calculate_normal_threshold(struct zone *zone);
 200void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 201                                int (*calculate_pressure)(struct zone *));
 202#else /* CONFIG_SMP */
 203
 204/*
 205 * We do not maintain differentials in a single processor configuration.
 206 * The functions directly modify the zone and global counters.
 207 */
 208static inline void __mod_zone_page_state(struct zone *zone,
 209                        enum zone_stat_item item, long delta)
 210{
 211        zone_page_state_add(delta, zone, item);
 212}
 213
 214static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 215{
 216        atomic_long_inc(&zone->vm_stat[item]);
 217        atomic_long_inc(&vm_stat[item]);
 218}
 219
 220static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 221{
 222        atomic_long_dec(&zone->vm_stat[item]);
 223        atomic_long_dec(&vm_stat[item]);
 224}
 225
 226static inline void __inc_zone_page_state(struct page *page,
 227                        enum zone_stat_item item)
 228{
 229        __inc_zone_state(page_zone(page), item);
 230}
 231
 232static inline void __dec_zone_page_state(struct page *page,
 233                        enum zone_stat_item item)
 234{
 235        __dec_zone_state(page_zone(page), item);
 236}
 237
 238/*
 239 * We only use atomic operations to update counters. So there is no need to
 240 * disable interrupts.
 241 */
 242#define inc_zone_page_state __inc_zone_page_state
 243#define dec_zone_page_state __dec_zone_page_state
 244#define mod_zone_page_state __mod_zone_page_state
 245
 246#define inc_zone_state __inc_zone_state
 247#define dec_zone_state __dec_zone_state
 248
 249#define set_pgdat_percpu_threshold(pgdat, callback) { }
 250
 251static inline void refresh_zone_stat_thresholds(void) { }
 252static inline void cpu_vm_stats_fold(int cpu) { }
 253static inline void quiet_vmstat(void) { }
 254
 255static inline void drain_zonestat(struct zone *zone,
 256                        struct per_cpu_pageset *pset) { }
 257#endif          /* CONFIG_SMP */
 258
 259static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 260                                             int migratetype)
 261{
 262        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 263        if (is_migrate_cma(migratetype))
 264                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 265}
 266
 267extern const char * const vmstat_text[];
 268
 269#endif /* _LINUX_VMSTAT_H */
 270