1
2#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
9#include <linux/atomic.h>
10#include <linux/static_key.h>
11
12extern int sysctl_stat_interval;
13
14#ifdef CONFIG_NUMA
15#define ENABLE_NUMA_STAT 1
16#define DISABLE_NUMA_STAT 0
17extern int sysctl_vm_numa_stat;
18DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
19int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
20 void *buffer, size_t *length, loff_t *ppos);
21#endif
22
23struct reclaim_stat {
24 unsigned nr_dirty;
25 unsigned nr_unqueued_dirty;
26 unsigned nr_congested;
27 unsigned nr_writeback;
28 unsigned nr_immediate;
29 unsigned nr_pageout;
30 unsigned nr_activate[2];
31 unsigned nr_ref_keep;
32 unsigned nr_unmap_fail;
33 unsigned nr_lazyfree_fail;
34};
35
36enum writeback_stat_item {
37 NR_DIRTY_THRESHOLD,
38 NR_DIRTY_BG_THRESHOLD,
39 NR_VM_WRITEBACK_STAT_ITEMS,
40};
41
42#ifdef CONFIG_VM_EVENT_COUNTERS
43
44
45
46
47
48
49
50
51
52
53struct vm_event_state {
54 unsigned long event[NR_VM_EVENT_ITEMS];
55};
56
57DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
58
59
60
61
62
63static inline void __count_vm_event(enum vm_event_item item)
64{
65 raw_cpu_inc(vm_event_states.event[item]);
66}
67
68static inline void count_vm_event(enum vm_event_item item)
69{
70 this_cpu_inc(vm_event_states.event[item]);
71}
72
73static inline void __count_vm_events(enum vm_event_item item, long delta)
74{
75 raw_cpu_add(vm_event_states.event[item], delta);
76}
77
78static inline void count_vm_events(enum vm_event_item item, long delta)
79{
80 this_cpu_add(vm_event_states.event[item], delta);
81}
82
83extern void all_vm_events(unsigned long *);
84
85extern void vm_events_fold_cpu(int cpu);
86
87#else
88
89
90static inline void count_vm_event(enum vm_event_item item)
91{
92}
93static inline void count_vm_events(enum vm_event_item item, long delta)
94{
95}
96static inline void __count_vm_event(enum vm_event_item item)
97{
98}
99static inline void __count_vm_events(enum vm_event_item item, long delta)
100{
101}
102static inline void all_vm_events(unsigned long *ret)
103{
104}
105static inline void vm_events_fold_cpu(int cpu)
106{
107}
108
109#endif
110
111#ifdef CONFIG_NUMA_BALANCING
112#define count_vm_numa_event(x) count_vm_event(x)
113#define count_vm_numa_events(x, y) count_vm_events(x, y)
114#else
115#define count_vm_numa_event(x) do {} while (0)
116#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
117#endif
118
119#ifdef CONFIG_DEBUG_TLBFLUSH
120#define count_vm_tlb_event(x) count_vm_event(x)
121#define count_vm_tlb_events(x, y) count_vm_events(x, y)
122#else
123#define count_vm_tlb_event(x) do {} while (0)
124#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
125#endif
126
127#ifdef CONFIG_DEBUG_VM_VMACACHE
128#define count_vm_vmacache_event(x) count_vm_event(x)
129#else
130#define count_vm_vmacache_event(x) do {} while (0)
131#endif
132
133#define __count_zid_vm_events(item, zid, delta) \
134 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
135
136
137
138
139extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
140extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
141extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
142
143#ifdef CONFIG_NUMA
144static inline void zone_numa_state_add(long x, struct zone *zone,
145 enum numa_stat_item item)
146{
147 atomic_long_add(x, &zone->vm_numa_stat[item]);
148 atomic_long_add(x, &vm_numa_stat[item]);
149}
150
151static inline unsigned long global_numa_state(enum numa_stat_item item)
152{
153 long x = atomic_long_read(&vm_numa_stat[item]);
154
155 return x;
156}
157
158static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
159 enum numa_stat_item item)
160{
161 long x = atomic_long_read(&zone->vm_numa_stat[item]);
162 int cpu;
163
164 for_each_online_cpu(cpu)
165 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
166
167 return x;
168}
169#endif
170
171static inline void zone_page_state_add(long x, struct zone *zone,
172 enum zone_stat_item item)
173{
174 atomic_long_add(x, &zone->vm_stat[item]);
175 atomic_long_add(x, &vm_zone_stat[item]);
176}
177
178static inline void node_page_state_add(long x, struct pglist_data *pgdat,
179 enum node_stat_item item)
180{
181 atomic_long_add(x, &pgdat->vm_stat[item]);
182 atomic_long_add(x, &vm_node_stat[item]);
183}
184
185static inline unsigned long global_zone_page_state(enum zone_stat_item item)
186{
187 long x = atomic_long_read(&vm_zone_stat[item]);
188#ifdef CONFIG_SMP
189 if (x < 0)
190 x = 0;
191#endif
192 return x;
193}
194
195static inline unsigned long global_node_page_state(enum node_stat_item item)
196{
197 long x = atomic_long_read(&vm_node_stat[item]);
198#ifdef CONFIG_SMP
199 if (x < 0)
200 x = 0;
201#endif
202 return x;
203}
204
205static inline unsigned long zone_page_state(struct zone *zone,
206 enum zone_stat_item item)
207{
208 long x = atomic_long_read(&zone->vm_stat[item]);
209#ifdef CONFIG_SMP
210 if (x < 0)
211 x = 0;
212#endif
213 return x;
214}
215
216
217
218
219
220
221
222static inline unsigned long zone_page_state_snapshot(struct zone *zone,
223 enum zone_stat_item item)
224{
225 long x = atomic_long_read(&zone->vm_stat[item]);
226
227#ifdef CONFIG_SMP
228 int cpu;
229 for_each_online_cpu(cpu)
230 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
231
232 if (x < 0)
233 x = 0;
234#endif
235 return x;
236}
237
238#ifdef CONFIG_NUMA
239extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
240extern unsigned long sum_zone_node_page_state(int node,
241 enum zone_stat_item item);
242extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
243extern unsigned long node_page_state(struct pglist_data *pgdat,
244 enum node_stat_item item);
245#else
246#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
247#define node_page_state(node, item) global_node_page_state(item)
248#endif
249
250#ifdef CONFIG_SMP
251void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
252void __inc_zone_page_state(struct page *, enum zone_stat_item);
253void __dec_zone_page_state(struct page *, enum zone_stat_item);
254
255void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
256void __inc_node_page_state(struct page *, enum node_stat_item);
257void __dec_node_page_state(struct page *, enum node_stat_item);
258
259void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
260void inc_zone_page_state(struct page *, enum zone_stat_item);
261void dec_zone_page_state(struct page *, enum zone_stat_item);
262
263void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
264void inc_node_page_state(struct page *, enum node_stat_item);
265void dec_node_page_state(struct page *, enum node_stat_item);
266
267extern void inc_node_state(struct pglist_data *, enum node_stat_item);
268extern void __inc_zone_state(struct zone *, enum zone_stat_item);
269extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
270extern void dec_zone_state(struct zone *, enum zone_stat_item);
271extern void __dec_zone_state(struct zone *, enum zone_stat_item);
272extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
273
274void quiet_vmstat(void);
275void cpu_vm_stats_fold(int cpu);
276void refresh_zone_stat_thresholds(void);
277
278struct ctl_table;
279int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
280 loff_t *ppos);
281
282void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
283
284int calculate_pressure_threshold(struct zone *zone);
285int calculate_normal_threshold(struct zone *zone);
286void set_pgdat_percpu_threshold(pg_data_t *pgdat,
287 int (*calculate_pressure)(struct zone *));
288#else
289
290
291
292
293
294static inline void __mod_zone_page_state(struct zone *zone,
295 enum zone_stat_item item, long delta)
296{
297 zone_page_state_add(delta, zone, item);
298}
299
300static inline void __mod_node_page_state(struct pglist_data *pgdat,
301 enum node_stat_item item, int delta)
302{
303 node_page_state_add(delta, pgdat, item);
304}
305
306static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
307{
308 atomic_long_inc(&zone->vm_stat[item]);
309 atomic_long_inc(&vm_zone_stat[item]);
310}
311
312static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
313{
314 atomic_long_inc(&pgdat->vm_stat[item]);
315 atomic_long_inc(&vm_node_stat[item]);
316}
317
318static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
319{
320 atomic_long_dec(&zone->vm_stat[item]);
321 atomic_long_dec(&vm_zone_stat[item]);
322}
323
324static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
325{
326 atomic_long_dec(&pgdat->vm_stat[item]);
327 atomic_long_dec(&vm_node_stat[item]);
328}
329
330static inline void __inc_zone_page_state(struct page *page,
331 enum zone_stat_item item)
332{
333 __inc_zone_state(page_zone(page), item);
334}
335
336static inline void __inc_node_page_state(struct page *page,
337 enum node_stat_item item)
338{
339 __inc_node_state(page_pgdat(page), item);
340}
341
342
343static inline void __dec_zone_page_state(struct page *page,
344 enum zone_stat_item item)
345{
346 __dec_zone_state(page_zone(page), item);
347}
348
349static inline void __dec_node_page_state(struct page *page,
350 enum node_stat_item item)
351{
352 __dec_node_state(page_pgdat(page), item);
353}
354
355
356
357
358
359
360#define inc_zone_page_state __inc_zone_page_state
361#define dec_zone_page_state __dec_zone_page_state
362#define mod_zone_page_state __mod_zone_page_state
363
364#define inc_node_page_state __inc_node_page_state
365#define dec_node_page_state __dec_node_page_state
366#define mod_node_page_state __mod_node_page_state
367
368#define inc_zone_state __inc_zone_state
369#define inc_node_state __inc_node_state
370#define dec_zone_state __dec_zone_state
371
372#define set_pgdat_percpu_threshold(pgdat, callback) { }
373
374static inline void refresh_zone_stat_thresholds(void) { }
375static inline void cpu_vm_stats_fold(int cpu) { }
376static inline void quiet_vmstat(void) { }
377
378static inline void drain_zonestat(struct zone *zone,
379 struct per_cpu_pageset *pset) { }
380#endif
381
382static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
383 int migratetype)
384{
385 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
386 if (is_migrate_cma(migratetype))
387 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
388}
389
390extern const char * const vmstat_text[];
391
392static inline const char *zone_stat_name(enum zone_stat_item item)
393{
394 return vmstat_text[item];
395}
396
397#ifdef CONFIG_NUMA
398static inline const char *numa_stat_name(enum numa_stat_item item)
399{
400 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
401 item];
402}
403#endif
404
405static inline const char *node_stat_name(enum node_stat_item item)
406{
407 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
408 NR_VM_NUMA_STAT_ITEMS +
409 item];
410}
411
412static inline const char *lru_list_name(enum lru_list lru)
413{
414 return node_stat_name(NR_LRU_BASE + lru) + 3;
415}
416
417static inline const char *writeback_stat_name(enum writeback_stat_item item)
418{
419 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
420 NR_VM_NUMA_STAT_ITEMS +
421 NR_VM_NODE_STAT_ITEMS +
422 item];
423}
424
425#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
426static inline const char *vm_event_name(enum vm_event_item item)
427{
428 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
429 NR_VM_NUMA_STAT_ITEMS +
430 NR_VM_NODE_STAT_ITEMS +
431 NR_VM_WRITEBACK_STAT_ITEMS +
432 item];
433}
434#endif
435
436#endif
437