linux/include/linux/vmstat.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_VMSTAT_H
   3#define _LINUX_VMSTAT_H
   4
   5#include <linux/types.h>
   6#include <linux/percpu.h>
   7#include <linux/mmzone.h>
   8#include <linux/vm_event_item.h>
   9#include <linux/atomic.h>
  10
  11extern int sysctl_stat_interval;
  12
  13#ifdef CONFIG_VM_EVENT_COUNTERS
  14/*
  15 * Light weight per cpu counter implementation.
  16 *
  17 * Counters should only be incremented and no critical kernel component
  18 * should rely on the counter values.
  19 *
  20 * Counters are handled completely inline. On many platforms the code
  21 * generated will simply be the increment of a global address.
  22 */
  23
  24struct vm_event_state {
  25        unsigned long event[NR_VM_EVENT_ITEMS];
  26};
  27
  28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  29
  30/*
  31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
  32 * local_irq_disable overhead.
  33 */
  34static inline void __count_vm_event(enum vm_event_item item)
  35{
  36        raw_cpu_inc(vm_event_states.event[item]);
  37}
  38
  39static inline void count_vm_event(enum vm_event_item item)
  40{
  41        this_cpu_inc(vm_event_states.event[item]);
  42}
  43
  44static inline void __count_vm_events(enum vm_event_item item, long delta)
  45{
  46        raw_cpu_add(vm_event_states.event[item], delta);
  47}
  48
  49static inline void count_vm_events(enum vm_event_item item, long delta)
  50{
  51        this_cpu_add(vm_event_states.event[item], delta);
  52}
  53
  54extern void all_vm_events(unsigned long *);
  55
  56extern void vm_events_fold_cpu(int cpu);
  57
  58#else
  59
  60/* Disable counters */
  61static inline void count_vm_event(enum vm_event_item item)
  62{
  63}
  64static inline void count_vm_events(enum vm_event_item item, long delta)
  65{
  66}
  67static inline void __count_vm_event(enum vm_event_item item)
  68{
  69}
  70static inline void __count_vm_events(enum vm_event_item item, long delta)
  71{
  72}
  73static inline void all_vm_events(unsigned long *ret)
  74{
  75}
  76static inline void vm_events_fold_cpu(int cpu)
  77{
  78}
  79
  80#endif /* CONFIG_VM_EVENT_COUNTERS */
  81
  82#ifdef CONFIG_NUMA_BALANCING
  83#define count_vm_numa_event(x)     count_vm_event(x)
  84#define count_vm_numa_events(x, y) count_vm_events(x, y)
  85#else
  86#define count_vm_numa_event(x) do {} while (0)
  87#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
  88#endif /* CONFIG_NUMA_BALANCING */
  89
  90#ifdef CONFIG_DEBUG_TLBFLUSH
  91#define count_vm_tlb_event(x)      count_vm_event(x)
  92#define count_vm_tlb_events(x, y)  count_vm_events(x, y)
  93#else
  94#define count_vm_tlb_event(x)     do {} while (0)
  95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
  96#endif
  97
  98#ifdef CONFIG_DEBUG_VM_VMACACHE
  99#define count_vm_vmacache_event(x) count_vm_event(x)
 100#else
 101#define count_vm_vmacache_event(x) do {} while (0)
 102#endif
 103
 104#define __count_zid_vm_events(item, zid, delta) \
 105        __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
 106
 107/*
 108 * Zone and node-based page accounting with per cpu differentials.
 109 */
 110extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
 111extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
 112extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
 113
 114#ifdef CONFIG_NUMA
 115static inline void zone_numa_state_add(long x, struct zone *zone,
 116                                 enum numa_stat_item item)
 117{
 118        atomic_long_add(x, &zone->vm_numa_stat[item]);
 119        atomic_long_add(x, &vm_numa_stat[item]);
 120}
 121
 122static inline unsigned long global_numa_state(enum numa_stat_item item)
 123{
 124        long x = atomic_long_read(&vm_numa_stat[item]);
 125
 126        return x;
 127}
 128
 129static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
 130                                        enum numa_stat_item item)
 131{
 132        long x = atomic_long_read(&zone->vm_numa_stat[item]);
 133        int cpu;
 134
 135        for_each_online_cpu(cpu)
 136                x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
 137
 138        return x;
 139}
 140#endif /* CONFIG_NUMA */
 141
 142static inline void zone_page_state_add(long x, struct zone *zone,
 143                                 enum zone_stat_item item)
 144{
 145        atomic_long_add(x, &zone->vm_stat[item]);
 146        atomic_long_add(x, &vm_zone_stat[item]);
 147}
 148
 149static inline void node_page_state_add(long x, struct pglist_data *pgdat,
 150                                 enum node_stat_item item)
 151{
 152        atomic_long_add(x, &pgdat->vm_stat[item]);
 153        atomic_long_add(x, &vm_node_stat[item]);
 154}
 155
 156static inline unsigned long global_zone_page_state(enum zone_stat_item item)
 157{
 158        long x = atomic_long_read(&vm_zone_stat[item]);
 159#ifdef CONFIG_SMP
 160        if (x < 0)
 161                x = 0;
 162#endif
 163        return x;
 164}
 165
 166static inline unsigned long global_node_page_state(enum node_stat_item item)
 167{
 168        long x = atomic_long_read(&vm_node_stat[item]);
 169#ifdef CONFIG_SMP
 170        if (x < 0)
 171                x = 0;
 172#endif
 173        return x;
 174}
 175
 176static inline unsigned long zone_page_state(struct zone *zone,
 177                                        enum zone_stat_item item)
 178{
 179        long x = atomic_long_read(&zone->vm_stat[item]);
 180#ifdef CONFIG_SMP
 181        if (x < 0)
 182                x = 0;
 183#endif
 184        return x;
 185}
 186
 187/*
 188 * More accurate version that also considers the currently pending
 189 * deltas. For that we need to loop over all cpus to find the current
 190 * deltas. There is no synchronization so the result cannot be
 191 * exactly accurate either.
 192 */
 193static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 194                                        enum zone_stat_item item)
 195{
 196        long x = atomic_long_read(&zone->vm_stat[item]);
 197
 198#ifdef CONFIG_SMP
 199        int cpu;
 200        for_each_online_cpu(cpu)
 201                x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
 202
 203        if (x < 0)
 204                x = 0;
 205#endif
 206        return x;
 207}
 208
 209static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
 210                                        enum node_stat_item item)
 211{
 212        long x = atomic_long_read(&pgdat->vm_stat[item]);
 213
 214#ifdef CONFIG_SMP
 215        int cpu;
 216        for_each_online_cpu(cpu)
 217                x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
 218
 219        if (x < 0)
 220                x = 0;
 221#endif
 222        return x;
 223}
 224
 225
 226#ifdef CONFIG_NUMA
 227extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
 228extern unsigned long sum_zone_node_page_state(int node,
 229                                              enum zone_stat_item item);
 230extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
 231extern unsigned long node_page_state(struct pglist_data *pgdat,
 232                                                enum node_stat_item item);
 233#else
 234#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
 235#define node_page_state(node, item) global_node_page_state(item)
 236#endif /* CONFIG_NUMA */
 237
 238#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 239#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 240#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
 241#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
 242
 243#ifdef CONFIG_SMP
 244void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
 245void __inc_zone_page_state(struct page *, enum zone_stat_item);
 246void __dec_zone_page_state(struct page *, enum zone_stat_item);
 247
 248void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
 249void __inc_node_page_state(struct page *, enum node_stat_item);
 250void __dec_node_page_state(struct page *, enum node_stat_item);
 251
 252void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
 253void inc_zone_page_state(struct page *, enum zone_stat_item);
 254void dec_zone_page_state(struct page *, enum zone_stat_item);
 255
 256void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
 257void inc_node_page_state(struct page *, enum node_stat_item);
 258void dec_node_page_state(struct page *, enum node_stat_item);
 259
 260extern void inc_node_state(struct pglist_data *, enum node_stat_item);
 261extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 262extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
 263extern void dec_zone_state(struct zone *, enum zone_stat_item);
 264extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 265extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
 266
 267void quiet_vmstat(void);
 268void cpu_vm_stats_fold(int cpu);
 269void refresh_zone_stat_thresholds(void);
 270
 271struct ctl_table;
 272int vmstat_refresh(struct ctl_table *, int write,
 273                   void __user *buffer, size_t *lenp, loff_t *ppos);
 274
 275void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
 276
 277int calculate_pressure_threshold(struct zone *zone);
 278int calculate_normal_threshold(struct zone *zone);
 279void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 280                                int (*calculate_pressure)(struct zone *));
 281#else /* CONFIG_SMP */
 282
 283/*
 284 * We do not maintain differentials in a single processor configuration.
 285 * The functions directly modify the zone and global counters.
 286 */
 287static inline void __mod_zone_page_state(struct zone *zone,
 288                        enum zone_stat_item item, long delta)
 289{
 290        zone_page_state_add(delta, zone, item);
 291}
 292
 293static inline void __mod_node_page_state(struct pglist_data *pgdat,
 294                        enum node_stat_item item, int delta)
 295{
 296        node_page_state_add(delta, pgdat, item);
 297}
 298
 299static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 300{
 301        atomic_long_inc(&zone->vm_stat[item]);
 302        atomic_long_inc(&vm_zone_stat[item]);
 303}
 304
 305static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 306{
 307        atomic_long_inc(&pgdat->vm_stat[item]);
 308        atomic_long_inc(&vm_node_stat[item]);
 309}
 310
 311static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 312{
 313        atomic_long_dec(&zone->vm_stat[item]);
 314        atomic_long_dec(&vm_zone_stat[item]);
 315}
 316
 317static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
 318{
 319        atomic_long_dec(&pgdat->vm_stat[item]);
 320        atomic_long_dec(&vm_node_stat[item]);
 321}
 322
 323static inline void __inc_zone_page_state(struct page *page,
 324                        enum zone_stat_item item)
 325{
 326        __inc_zone_state(page_zone(page), item);
 327}
 328
 329static inline void __inc_node_page_state(struct page *page,
 330                        enum node_stat_item item)
 331{
 332        __inc_node_state(page_pgdat(page), item);
 333}
 334
 335
 336static inline void __dec_zone_page_state(struct page *page,
 337                        enum zone_stat_item item)
 338{
 339        __dec_zone_state(page_zone(page), item);
 340}
 341
 342static inline void __dec_node_page_state(struct page *page,
 343                        enum node_stat_item item)
 344{
 345        __dec_node_state(page_pgdat(page), item);
 346}
 347
 348
 349/*
 350 * We only use atomic operations to update counters. So there is no need to
 351 * disable interrupts.
 352 */
 353#define inc_zone_page_state __inc_zone_page_state
 354#define dec_zone_page_state __dec_zone_page_state
 355#define mod_zone_page_state __mod_zone_page_state
 356
 357#define inc_node_page_state __inc_node_page_state
 358#define dec_node_page_state __dec_node_page_state
 359#define mod_node_page_state __mod_node_page_state
 360
 361#define inc_zone_state __inc_zone_state
 362#define inc_node_state __inc_node_state
 363#define dec_zone_state __dec_zone_state
 364
 365#define set_pgdat_percpu_threshold(pgdat, callback) { }
 366
 367static inline void refresh_zone_stat_thresholds(void) { }
 368static inline void cpu_vm_stats_fold(int cpu) { }
 369static inline void quiet_vmstat(void) { }
 370
 371static inline void drain_zonestat(struct zone *zone,
 372                        struct per_cpu_pageset *pset) { }
 373#endif          /* CONFIG_SMP */
 374
 375static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 376                                             int migratetype)
 377{
 378        __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 379        if (is_migrate_cma(migratetype))
 380                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 381}
 382
 383extern const char * const vmstat_text[];
 384
 385#endif /* _LINUX_VMSTAT_H */
 386