linux/include/linux/vmstat.h
<<
>>
Prefs
   1#ifndef _LINUX_VMSTAT_H
   2#define _LINUX_VMSTAT_H
   3
   4#include <linux/types.h>
   5#include <linux/percpu.h>
   6#include <linux/mm.h>
   7#include <linux/mmzone.h>
   8#include <linux/vm_event_item.h>
   9#include <linux/atomic.h>
  10
  11extern int sysctl_stat_interval;
  12
  13#ifdef CONFIG_VM_EVENT_COUNTERS
  14/*
  15 * Light weight per cpu counter implementation.
  16 *
  17 * Counters should only be incremented and no critical kernel component
  18 * should rely on the counter values.
  19 *
  20 * Counters are handled completely inline. On many platforms the code
  21 * generated will simply be the increment of a global address.
  22 */
  23
  24struct vm_event_state {
  25        unsigned long event[NR_VM_EVENT_ITEMS];
  26};
  27
  28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  29
  30static inline void __count_vm_event(enum vm_event_item item)
  31{
  32        __this_cpu_inc(vm_event_states.event[item]);
  33}
  34
  35static inline void count_vm_event(enum vm_event_item item)
  36{
  37        this_cpu_inc(vm_event_states.event[item]);
  38}
  39
  40static inline void __count_vm_events(enum vm_event_item item, long delta)
  41{
  42        __this_cpu_add(vm_event_states.event[item], delta);
  43}
  44
  45static inline void count_vm_events(enum vm_event_item item, long delta)
  46{
  47        this_cpu_add(vm_event_states.event[item], delta);
  48}
  49
  50extern void all_vm_events(unsigned long *);
  51#ifdef CONFIG_HOTPLUG
  52extern void vm_events_fold_cpu(int cpu);
  53#else
  54static inline void vm_events_fold_cpu(int cpu)
  55{
  56}
  57#endif
  58
  59#else
  60
  61/* Disable counters */
  62static inline void count_vm_event(enum vm_event_item item)
  63{
  64}
  65static inline void count_vm_events(enum vm_event_item item, long delta)
  66{
  67}
  68static inline void __count_vm_event(enum vm_event_item item)
  69{
  70}
  71static inline void __count_vm_events(enum vm_event_item item, long delta)
  72{
  73}
  74static inline void all_vm_events(unsigned long *ret)
  75{
  76}
  77static inline void vm_events_fold_cpu(int cpu)
  78{
  79}
  80
  81#endif /* CONFIG_VM_EVENT_COUNTERS */
  82
  83#define __count_zone_vm_events(item, zone, delta) \
  84                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  85                zone_idx(zone), delta)
  86
  87/*
  88 * Zone based page accounting with per cpu differentials.
  89 */
  90extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  91
  92static inline void zone_page_state_add(long x, struct zone *zone,
  93                                 enum zone_stat_item item)
  94{
  95        atomic_long_add(x, &zone->vm_stat[item]);
  96        atomic_long_add(x, &vm_stat[item]);
  97}
  98
  99static inline unsigned long global_page_state(enum zone_stat_item item)
 100{
 101        long x = atomic_long_read(&vm_stat[item]);
 102#ifdef CONFIG_SMP
 103        if (x < 0)
 104                x = 0;
 105#endif
 106        return x;
 107}
 108
 109static inline unsigned long zone_page_state(struct zone *zone,
 110                                        enum zone_stat_item item)
 111{
 112        long x = atomic_long_read(&zone->vm_stat[item]);
 113#ifdef CONFIG_SMP
 114        if (x < 0)
 115                x = 0;
 116#endif
 117        return x;
 118}
 119
 120/*
 121 * More accurate version that also considers the currently pending
 122 * deltas. For that we need to loop over all cpus to find the current
 123 * deltas. There is no synchronization so the result cannot be
 124 * exactly accurate either.
 125 */
 126static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 127                                        enum zone_stat_item item)
 128{
 129        long x = atomic_long_read(&zone->vm_stat[item]);
 130
 131#ifdef CONFIG_SMP
 132        int cpu;
 133        for_each_online_cpu(cpu)
 134                x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
 135
 136        if (x < 0)
 137                x = 0;
 138#endif
 139        return x;
 140}
 141
 142extern unsigned long global_reclaimable_pages(void);
 143extern unsigned long zone_reclaimable_pages(struct zone *zone);
 144
 145#ifdef CONFIG_NUMA
 146/*
 147 * Determine the per node value of a stat item. This function
 148 * is called frequently in a NUMA machine, so try to be as
 149 * frugal as possible.
 150 */
 151static inline unsigned long node_page_state(int node,
 152                                 enum zone_stat_item item)
 153{
 154        struct zone *zones = NODE_DATA(node)->node_zones;
 155
 156        return
 157#ifdef CONFIG_ZONE_DMA
 158                zone_page_state(&zones[ZONE_DMA], item) +
 159#endif
 160#ifdef CONFIG_ZONE_DMA32
 161                zone_page_state(&zones[ZONE_DMA32], item) +
 162#endif
 163#ifdef CONFIG_HIGHMEM
 164                zone_page_state(&zones[ZONE_HIGHMEM], item) +
 165#endif
 166                zone_page_state(&zones[ZONE_NORMAL], item) +
 167                zone_page_state(&zones[ZONE_MOVABLE], item);
 168}
 169
 170extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
 171
 172#else
 173
 174#define node_page_state(node, item) global_page_state(item)
 175#define zone_statistics(_zl, _z, gfp) do { } while (0)
 176
 177#endif /* CONFIG_NUMA */
 178
 179#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 180#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 181
 182extern void inc_zone_state(struct zone *, enum zone_stat_item);
 183
 184#ifdef CONFIG_SMP
 185void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
 186void __inc_zone_page_state(struct page *, enum zone_stat_item);
 187void __dec_zone_page_state(struct page *, enum zone_stat_item);
 188
 189void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
 190void inc_zone_page_state(struct page *, enum zone_stat_item);
 191void dec_zone_page_state(struct page *, enum zone_stat_item);
 192
 193extern void inc_zone_state(struct zone *, enum zone_stat_item);
 194extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 195extern void dec_zone_state(struct zone *, enum zone_stat_item);
 196extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 197
 198void refresh_cpu_vm_stats(int);
 199void refresh_zone_stat_thresholds(void);
 200
 201void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
 202
 203int calculate_pressure_threshold(struct zone *zone);
 204int calculate_normal_threshold(struct zone *zone);
 205void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 206                                int (*calculate_pressure)(struct zone *));
 207#else /* CONFIG_SMP */
 208
 209/*
 210 * We do not maintain differentials in a single processor configuration.
 211 * The functions directly modify the zone and global counters.
 212 */
 213static inline void __mod_zone_page_state(struct zone *zone,
 214                        enum zone_stat_item item, int delta)
 215{
 216        zone_page_state_add(delta, zone, item);
 217}
 218
 219static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 220{
 221        atomic_long_inc(&zone->vm_stat[item]);
 222        atomic_long_inc(&vm_stat[item]);
 223}
 224
 225static inline void __inc_zone_page_state(struct page *page,
 226                        enum zone_stat_item item)
 227{
 228        __inc_zone_state(page_zone(page), item);
 229}
 230
 231static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 232{
 233        atomic_long_dec(&zone->vm_stat[item]);
 234        atomic_long_dec(&vm_stat[item]);
 235}
 236
 237static inline void __dec_zone_page_state(struct page *page,
 238                        enum zone_stat_item item)
 239{
 240        __dec_zone_state(page_zone(page), item);
 241}
 242
 243/*
 244 * We only use atomic operations to update counters. So there is no need to
 245 * disable interrupts.
 246 */
 247#define inc_zone_page_state __inc_zone_page_state
 248#define dec_zone_page_state __dec_zone_page_state
 249#define mod_zone_page_state __mod_zone_page_state
 250
 251#define set_pgdat_percpu_threshold(pgdat, callback) { }
 252
 253static inline void refresh_cpu_vm_stats(int cpu) { }
 254static inline void refresh_zone_stat_thresholds(void) { }
 255
 256static inline void drain_zonestat(struct zone *zone,
 257                        struct per_cpu_pageset *pset) { }
 258#endif          /* CONFIG_SMP */
 259
 260static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 261                                             int migratetype)
 262{
 263        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 264        if (is_migrate_cma(migratetype))
 265                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 266}
 267
 268extern const char * const vmstat_text[];
 269
 270#endif /* _LINUX_VMSTAT_H */
 271