1
2#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
9#include <linux/atomic.h>
10#include <linux/static_key.h>
11
12extern int sysctl_stat_interval;
13
14#ifdef CONFIG_NUMA
15#define ENABLE_NUMA_STAT 1
16#define DISABLE_NUMA_STAT 0
17extern int sysctl_vm_numa_stat;
18DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
19extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
20 int write, void __user *buffer, size_t *length, loff_t *ppos);
21#endif
22
23struct reclaim_stat {
24 unsigned nr_dirty;
25 unsigned nr_unqueued_dirty;
26 unsigned nr_congested;
27 unsigned nr_writeback;
28 unsigned nr_immediate;
29 unsigned nr_activate[2];
30 unsigned nr_ref_keep;
31 unsigned nr_unmap_fail;
32};
33
34enum writeback_stat_item {
35 NR_DIRTY_THRESHOLD,
36 NR_DIRTY_BG_THRESHOLD,
37 NR_VM_WRITEBACK_STAT_ITEMS,
38};
39
40#ifdef CONFIG_VM_EVENT_COUNTERS
41
42
43
44
45
46
47
48
49
50
51struct vm_event_state {
52 unsigned long event[NR_VM_EVENT_ITEMS];
53};
54
55DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
56
57
58
59
60
61static inline void __count_vm_event(enum vm_event_item item)
62{
63 raw_cpu_inc(vm_event_states.event[item]);
64}
65
66static inline void count_vm_event(enum vm_event_item item)
67{
68 this_cpu_inc(vm_event_states.event[item]);
69}
70
71static inline void __count_vm_events(enum vm_event_item item, long delta)
72{
73 raw_cpu_add(vm_event_states.event[item], delta);
74}
75
76static inline void count_vm_events(enum vm_event_item item, long delta)
77{
78 this_cpu_add(vm_event_states.event[item], delta);
79}
80
81extern void all_vm_events(unsigned long *);
82
83extern void vm_events_fold_cpu(int cpu);
84
85#else
86
87
88static inline void count_vm_event(enum vm_event_item item)
89{
90}
91static inline void count_vm_events(enum vm_event_item item, long delta)
92{
93}
94static inline void __count_vm_event(enum vm_event_item item)
95{
96}
97static inline void __count_vm_events(enum vm_event_item item, long delta)
98{
99}
100static inline void all_vm_events(unsigned long *ret)
101{
102}
103static inline void vm_events_fold_cpu(int cpu)
104{
105}
106
107#endif
108
109#ifdef CONFIG_NUMA_BALANCING
110#define count_vm_numa_event(x) count_vm_event(x)
111#define count_vm_numa_events(x, y) count_vm_events(x, y)
112#else
113#define count_vm_numa_event(x) do {} while (0)
114#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
115#endif
116
117#ifdef CONFIG_DEBUG_TLBFLUSH
118#define count_vm_tlb_event(x) count_vm_event(x)
119#define count_vm_tlb_events(x, y) count_vm_events(x, y)
120#else
121#define count_vm_tlb_event(x) do {} while (0)
122#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
123#endif
124
125#ifdef CONFIG_DEBUG_VM_VMACACHE
126#define count_vm_vmacache_event(x) count_vm_event(x)
127#else
128#define count_vm_vmacache_event(x) do {} while (0)
129#endif
130
131#define __count_zid_vm_events(item, zid, delta) \
132 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
133
134
135
136
137extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
138extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
139extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
140
141#ifdef CONFIG_NUMA
142static inline void zone_numa_state_add(long x, struct zone *zone,
143 enum numa_stat_item item)
144{
145 atomic_long_add(x, &zone->vm_numa_stat[item]);
146 atomic_long_add(x, &vm_numa_stat[item]);
147}
148
149static inline unsigned long global_numa_state(enum numa_stat_item item)
150{
151 long x = atomic_long_read(&vm_numa_stat[item]);
152
153 return x;
154}
155
156static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
157 enum numa_stat_item item)
158{
159 long x = atomic_long_read(&zone->vm_numa_stat[item]);
160 int cpu;
161
162 for_each_online_cpu(cpu)
163 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
164
165 return x;
166}
167#endif
168
169static inline void zone_page_state_add(long x, struct zone *zone,
170 enum zone_stat_item item)
171{
172 atomic_long_add(x, &zone->vm_stat[item]);
173 atomic_long_add(x, &vm_zone_stat[item]);
174}
175
176static inline void node_page_state_add(long x, struct pglist_data *pgdat,
177 enum node_stat_item item)
178{
179 atomic_long_add(x, &pgdat->vm_stat[item]);
180 atomic_long_add(x, &vm_node_stat[item]);
181}
182
183static inline unsigned long global_zone_page_state(enum zone_stat_item item)
184{
185 long x = atomic_long_read(&vm_zone_stat[item]);
186#ifdef CONFIG_SMP
187 if (x < 0)
188 x = 0;
189#endif
190 return x;
191}
192
193static inline unsigned long global_node_page_state(enum node_stat_item item)
194{
195 long x = atomic_long_read(&vm_node_stat[item]);
196#ifdef CONFIG_SMP
197 if (x < 0)
198 x = 0;
199#endif
200 return x;
201}
202
203static inline unsigned long zone_page_state(struct zone *zone,
204 enum zone_stat_item item)
205{
206 long x = atomic_long_read(&zone->vm_stat[item]);
207#ifdef CONFIG_SMP
208 if (x < 0)
209 x = 0;
210#endif
211 return x;
212}
213
214
215
216
217
218
219
220static inline unsigned long zone_page_state_snapshot(struct zone *zone,
221 enum zone_stat_item item)
222{
223 long x = atomic_long_read(&zone->vm_stat[item]);
224
225#ifdef CONFIG_SMP
226 int cpu;
227 for_each_online_cpu(cpu)
228 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
229
230 if (x < 0)
231 x = 0;
232#endif
233 return x;
234}
235
236#ifdef CONFIG_NUMA
237extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
238extern unsigned long sum_zone_node_page_state(int node,
239 enum zone_stat_item item);
240extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
241extern unsigned long node_page_state(struct pglist_data *pgdat,
242 enum node_stat_item item);
243#else
244#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
245#define node_page_state(node, item) global_node_page_state(item)
246#endif
247
248#ifdef CONFIG_SMP
249void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
250void __inc_zone_page_state(struct page *, enum zone_stat_item);
251void __dec_zone_page_state(struct page *, enum zone_stat_item);
252
253void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
254void __inc_node_page_state(struct page *, enum node_stat_item);
255void __dec_node_page_state(struct page *, enum node_stat_item);
256
257void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
258void inc_zone_page_state(struct page *, enum zone_stat_item);
259void dec_zone_page_state(struct page *, enum zone_stat_item);
260
261void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
262void inc_node_page_state(struct page *, enum node_stat_item);
263void dec_node_page_state(struct page *, enum node_stat_item);
264
265extern void inc_node_state(struct pglist_data *, enum node_stat_item);
266extern void __inc_zone_state(struct zone *, enum zone_stat_item);
267extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
268extern void dec_zone_state(struct zone *, enum zone_stat_item);
269extern void __dec_zone_state(struct zone *, enum zone_stat_item);
270extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
271
272void quiet_vmstat(void);
273void cpu_vm_stats_fold(int cpu);
274void refresh_zone_stat_thresholds(void);
275
276struct ctl_table;
277int vmstat_refresh(struct ctl_table *, int write,
278 void __user *buffer, size_t *lenp, loff_t *ppos);
279
280void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
281
282int calculate_pressure_threshold(struct zone *zone);
283int calculate_normal_threshold(struct zone *zone);
284void set_pgdat_percpu_threshold(pg_data_t *pgdat,
285 int (*calculate_pressure)(struct zone *));
286#else
287
288
289
290
291
292static inline void __mod_zone_page_state(struct zone *zone,
293 enum zone_stat_item item, long delta)
294{
295 zone_page_state_add(delta, zone, item);
296}
297
298static inline void __mod_node_page_state(struct pglist_data *pgdat,
299 enum node_stat_item item, int delta)
300{
301 node_page_state_add(delta, pgdat, item);
302}
303
304static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
305{
306 atomic_long_inc(&zone->vm_stat[item]);
307 atomic_long_inc(&vm_zone_stat[item]);
308}
309
310static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
311{
312 atomic_long_inc(&pgdat->vm_stat[item]);
313 atomic_long_inc(&vm_node_stat[item]);
314}
315
316static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
317{
318 atomic_long_dec(&zone->vm_stat[item]);
319 atomic_long_dec(&vm_zone_stat[item]);
320}
321
322static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
323{
324 atomic_long_dec(&pgdat->vm_stat[item]);
325 atomic_long_dec(&vm_node_stat[item]);
326}
327
328static inline void __inc_zone_page_state(struct page *page,
329 enum zone_stat_item item)
330{
331 __inc_zone_state(page_zone(page), item);
332}
333
334static inline void __inc_node_page_state(struct page *page,
335 enum node_stat_item item)
336{
337 __inc_node_state(page_pgdat(page), item);
338}
339
340
341static inline void __dec_zone_page_state(struct page *page,
342 enum zone_stat_item item)
343{
344 __dec_zone_state(page_zone(page), item);
345}
346
347static inline void __dec_node_page_state(struct page *page,
348 enum node_stat_item item)
349{
350 __dec_node_state(page_pgdat(page), item);
351}
352
353
354
355
356
357
358#define inc_zone_page_state __inc_zone_page_state
359#define dec_zone_page_state __dec_zone_page_state
360#define mod_zone_page_state __mod_zone_page_state
361
362#define inc_node_page_state __inc_node_page_state
363#define dec_node_page_state __dec_node_page_state
364#define mod_node_page_state __mod_node_page_state
365
366#define inc_zone_state __inc_zone_state
367#define inc_node_state __inc_node_state
368#define dec_zone_state __dec_zone_state
369
370#define set_pgdat_percpu_threshold(pgdat, callback) { }
371
372static inline void refresh_zone_stat_thresholds(void) { }
373static inline void cpu_vm_stats_fold(int cpu) { }
374static inline void quiet_vmstat(void) { }
375
376static inline void drain_zonestat(struct zone *zone,
377 struct per_cpu_pageset *pset) { }
378#endif
379
380static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
381 int migratetype)
382{
383 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
384 if (is_migrate_cma(migratetype))
385 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
386}
387
388extern const char * const vmstat_text[];
389
390static inline const char *zone_stat_name(enum zone_stat_item item)
391{
392 return vmstat_text[item];
393}
394
395#ifdef CONFIG_NUMA
396static inline const char *numa_stat_name(enum numa_stat_item item)
397{
398 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
399 item];
400}
401#endif
402
403static inline const char *node_stat_name(enum node_stat_item item)
404{
405 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
406 NR_VM_NUMA_STAT_ITEMS +
407 item];
408}
409
410static inline const char *lru_list_name(enum lru_list lru)
411{
412 return node_stat_name(NR_LRU_BASE + lru) + 3;
413}
414
415static inline const char *writeback_stat_name(enum writeback_stat_item item)
416{
417 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
418 NR_VM_NUMA_STAT_ITEMS +
419 NR_VM_NODE_STAT_ITEMS +
420 item];
421}
422
423#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
424static inline const char *vm_event_name(enum vm_event_item item)
425{
426 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
427 NR_VM_NUMA_STAT_ITEMS +
428 NR_VM_NODE_STAT_ITEMS +
429 NR_VM_WRITEBACK_STAT_ITEMS +
430 item];
431}
432#endif
433
434#endif
435