linux/include/linux/vmstat.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_VMSTAT_H
   3#define _LINUX_VMSTAT_H
   4
   5#include <linux/types.h>
   6#include <linux/percpu.h>
   7#include <linux/mmzone.h>
   8#include <linux/vm_event_item.h>
   9#include <linux/atomic.h>
  10#include <linux/static_key.h>
  11
  12extern int sysctl_stat_interval;
  13
  14#ifdef CONFIG_NUMA
  15#define ENABLE_NUMA_STAT   1
  16#define DISABLE_NUMA_STAT   0
  17extern int sysctl_vm_numa_stat;
  18DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
  19extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
  20                int write, void __user *buffer, size_t *length, loff_t *ppos);
  21#endif
  22
  23struct reclaim_stat {
  24        unsigned nr_dirty;
  25        unsigned nr_unqueued_dirty;
  26        unsigned nr_congested;
  27        unsigned nr_writeback;
  28        unsigned nr_immediate;
  29        unsigned nr_activate[2];
  30        unsigned nr_ref_keep;
  31        unsigned nr_unmap_fail;
  32};
  33
  34#ifdef CONFIG_VM_EVENT_COUNTERS
  35/*
  36 * Light weight per cpu counter implementation.
  37 *
  38 * Counters should only be incremented and no critical kernel component
  39 * should rely on the counter values.
  40 *
  41 * Counters are handled completely inline. On many platforms the code
  42 * generated will simply be the increment of a global address.
  43 */
  44
  45struct vm_event_state {
  46        unsigned long event[NR_VM_EVENT_ITEMS];
  47};
  48
  49DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  50
  51/*
  52 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
  53 * local_irq_disable overhead.
  54 */
  55static inline void __count_vm_event(enum vm_event_item item)
  56{
  57        raw_cpu_inc(vm_event_states.event[item]);
  58}
  59
  60static inline void count_vm_event(enum vm_event_item item)
  61{
  62        this_cpu_inc(vm_event_states.event[item]);
  63}
  64
  65static inline void __count_vm_events(enum vm_event_item item, long delta)
  66{
  67        raw_cpu_add(vm_event_states.event[item], delta);
  68}
  69
  70static inline void count_vm_events(enum vm_event_item item, long delta)
  71{
  72        this_cpu_add(vm_event_states.event[item], delta);
  73}
  74
  75extern void all_vm_events(unsigned long *);
  76
  77extern void vm_events_fold_cpu(int cpu);
  78
  79#else
  80
  81/* Disable counters */
  82static inline void count_vm_event(enum vm_event_item item)
  83{
  84}
  85static inline void count_vm_events(enum vm_event_item item, long delta)
  86{
  87}
  88static inline void __count_vm_event(enum vm_event_item item)
  89{
  90}
  91static inline void __count_vm_events(enum vm_event_item item, long delta)
  92{
  93}
  94static inline void all_vm_events(unsigned long *ret)
  95{
  96}
  97static inline void vm_events_fold_cpu(int cpu)
  98{
  99}
 100
 101#endif /* CONFIG_VM_EVENT_COUNTERS */
 102
 103#ifdef CONFIG_NUMA_BALANCING
 104#define count_vm_numa_event(x)     count_vm_event(x)
 105#define count_vm_numa_events(x, y) count_vm_events(x, y)
 106#else
 107#define count_vm_numa_event(x) do {} while (0)
 108#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
 109#endif /* CONFIG_NUMA_BALANCING */
 110
 111#ifdef CONFIG_DEBUG_TLBFLUSH
 112#define count_vm_tlb_event(x)      count_vm_event(x)
 113#define count_vm_tlb_events(x, y)  count_vm_events(x, y)
 114#else
 115#define count_vm_tlb_event(x)     do {} while (0)
 116#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
 117#endif
 118
 119#ifdef CONFIG_DEBUG_VM_VMACACHE
 120#define count_vm_vmacache_event(x) count_vm_event(x)
 121#else
 122#define count_vm_vmacache_event(x) do {} while (0)
 123#endif
 124
 125#define __count_zid_vm_events(item, zid, delta) \
 126        __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
 127
 128/*
 129 * Zone and node-based page accounting with per cpu differentials.
 130 */
 131extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
 132extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
 133extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
 134
 135#ifdef CONFIG_NUMA
 136static inline void zone_numa_state_add(long x, struct zone *zone,
 137                                 enum numa_stat_item item)
 138{
 139        atomic_long_add(x, &zone->vm_numa_stat[item]);
 140        atomic_long_add(x, &vm_numa_stat[item]);
 141}
 142
 143static inline unsigned long global_numa_state(enum numa_stat_item item)
 144{
 145        long x = atomic_long_read(&vm_numa_stat[item]);
 146
 147        return x;
 148}
 149
 150static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
 151                                        enum numa_stat_item item)
 152{
 153        long x = atomic_long_read(&zone->vm_numa_stat[item]);
 154        int cpu;
 155
 156        for_each_online_cpu(cpu)
 157                x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
 158
 159        return x;
 160}
 161#endif /* CONFIG_NUMA */
 162
 163static inline void zone_page_state_add(long x, struct zone *zone,
 164                                 enum zone_stat_item item)
 165{
 166        atomic_long_add(x, &zone->vm_stat[item]);
 167        atomic_long_add(x, &vm_zone_stat[item]);
 168}
 169
 170static inline void node_page_state_add(long x, struct pglist_data *pgdat,
 171                                 enum node_stat_item item)
 172{
 173        atomic_long_add(x, &pgdat->vm_stat[item]);
 174        atomic_long_add(x, &vm_node_stat[item]);
 175}
 176
 177static inline unsigned long global_zone_page_state(enum zone_stat_item item)
 178{
 179        long x = atomic_long_read(&vm_zone_stat[item]);
 180#ifdef CONFIG_SMP
 181        if (x < 0)
 182                x = 0;
 183#endif
 184        return x;
 185}
 186
 187static inline unsigned long global_node_page_state(enum node_stat_item item)
 188{
 189        long x = atomic_long_read(&vm_node_stat[item]);
 190#ifdef CONFIG_SMP
 191        if (x < 0)
 192                x = 0;
 193#endif
 194        return x;
 195}
 196
 197static inline unsigned long zone_page_state(struct zone *zone,
 198                                        enum zone_stat_item item)
 199{
 200        long x = atomic_long_read(&zone->vm_stat[item]);
 201#ifdef CONFIG_SMP
 202        if (x < 0)
 203                x = 0;
 204#endif
 205        return x;
 206}
 207
 208/*
 209 * More accurate version that also considers the currently pending
 210 * deltas. For that we need to loop over all cpus to find the current
 211 * deltas. There is no synchronization so the result cannot be
 212 * exactly accurate either.
 213 */
 214static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 215                                        enum zone_stat_item item)
 216{
 217        long x = atomic_long_read(&zone->vm_stat[item]);
 218
 219#ifdef CONFIG_SMP
 220        int cpu;
 221        for_each_online_cpu(cpu)
 222                x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
 223
 224        if (x < 0)
 225                x = 0;
 226#endif
 227        return x;
 228}
 229
 230#ifdef CONFIG_NUMA
 231extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
 232extern unsigned long sum_zone_node_page_state(int node,
 233                                              enum zone_stat_item item);
 234extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
 235extern unsigned long node_page_state(struct pglist_data *pgdat,
 236                                                enum node_stat_item item);
 237#else
 238#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
 239#define node_page_state(node, item) global_node_page_state(item)
 240#endif /* CONFIG_NUMA */
 241
 242#ifdef CONFIG_SMP
 243void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
 244void __inc_zone_page_state(struct page *, enum zone_stat_item);
 245void __dec_zone_page_state(struct page *, enum zone_stat_item);
 246
 247void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
 248void __inc_node_page_state(struct page *, enum node_stat_item);
 249void __dec_node_page_state(struct page *, enum node_stat_item);
 250
 251void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
 252void inc_zone_page_state(struct page *, enum zone_stat_item);
 253void dec_zone_page_state(struct page *, enum zone_stat_item);
 254
 255void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
 256void inc_node_page_state(struct page *, enum node_stat_item);
 257void dec_node_page_state(struct page *, enum node_stat_item);
 258
 259extern void inc_node_state(struct pglist_data *, enum node_stat_item);
 260extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 261extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
 262extern void dec_zone_state(struct zone *, enum zone_stat_item);
 263extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 264extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
 265
 266void quiet_vmstat(void);
 267void cpu_vm_stats_fold(int cpu);
 268void refresh_zone_stat_thresholds(void);
 269
 270struct ctl_table;
 271int vmstat_refresh(struct ctl_table *, int write,
 272                   void __user *buffer, size_t *lenp, loff_t *ppos);
 273
 274void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
 275
 276int calculate_pressure_threshold(struct zone *zone);
 277int calculate_normal_threshold(struct zone *zone);
 278void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 279                                int (*calculate_pressure)(struct zone *));
 280#else /* CONFIG_SMP */
 281
 282/*
 283 * We do not maintain differentials in a single processor configuration.
 284 * The functions directly modify the zone and global counters.
 285 */
 286static inline void __mod_zone_page_state(struct zone *zone,
 287                        enum zone_stat_item item, long delta)
 288{
 289        zone_page_state_add(delta, zone, item);
 290}
 291
 292static inline void __mod_node_page_state(struct pglist_data *pgdat,
 293                        enum node_stat_item item, int delta)
 294{
 295        node_page_state_add(delta, pgdat, item);
 296}
 297
 298static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 299{
 300        atomic_long_inc(&zone->vm_stat[item]);
 301        atomic_long_inc(&vm_zone_stat[item]);
 302}
 303
 304static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 305{
 306        atomic_long_inc(&pgdat->vm_stat[item]);
 307        atomic_long_inc(&vm_node_stat[item]);
 308}
 309
 310static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 311{
 312        atomic_long_dec(&zone->vm_stat[item]);
 313        atomic_long_dec(&vm_zone_stat[item]);
 314}
 315
 316static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 317{
 318        atomic_long_dec(&pgdat->vm_stat[item]);
 319        atomic_long_dec(&vm_node_stat[item]);
 320}
 321
 322static inline void __inc_zone_page_state(struct page *page,
 323                        enum zone_stat_item item)
 324{
 325        __inc_zone_state(page_zone(page), item);
 326}
 327
 328static inline void __inc_node_page_state(struct page *page,
 329                        enum node_stat_item item)
 330{
 331        __inc_node_state(page_pgdat(page), item);
 332}
 333
 334
 335static inline void __dec_zone_page_state(struct page *page,
 336                        enum zone_stat_item item)
 337{
 338        __dec_zone_state(page_zone(page), item);
 339}
 340
 341static inline void __dec_node_page_state(struct page *page,
 342                        enum node_stat_item item)
 343{
 344        __dec_node_state(page_pgdat(page), item);
 345}
 346
 347
 348/*
 349 * We only use atomic operations to update counters. So there is no need to
 350 * disable interrupts.
 351 */
 352#define inc_zone_page_state __inc_zone_page_state
 353#define dec_zone_page_state __dec_zone_page_state
 354#define mod_zone_page_state __mod_zone_page_state
 355
 356#define inc_node_page_state __inc_node_page_state
 357#define dec_node_page_state __dec_node_page_state
 358#define mod_node_page_state __mod_node_page_state
 359
 360#define inc_zone_state __inc_zone_state
 361#define inc_node_state __inc_node_state
 362#define dec_zone_state __dec_zone_state
 363
 364#define set_pgdat_percpu_threshold(pgdat, callback) { }
 365
 366static inline void refresh_zone_stat_thresholds(void) { }
 367static inline void cpu_vm_stats_fold(int cpu) { }
 368static inline void quiet_vmstat(void) { }
 369
 370static inline void drain_zonestat(struct zone *zone,
 371                        struct per_cpu_pageset *pset) { }
 372#endif          /* CONFIG_SMP */
 373
 374static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 375                                             int migratetype)
 376{
 377        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 378        if (is_migrate_cma(migratetype))
 379                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 380}
 381
 382extern const char * const vmstat_text[];
 383
 384#endif /* _LINUX_VMSTAT_H */
 385