1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6#include <linux/mm.h>
7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
9#include <linux/atomic.h>
10
11extern int sysctl_stat_interval;
12
13#ifdef CONFIG_VM_EVENT_COUNTERS
14
15
16
17
18
19
20
21
22
23
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
26};
27
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
30static inline void __count_vm_event(enum vm_event_item item)
31{
32 __this_cpu_inc(vm_event_states.event[item]);
33}
34
35static inline void count_vm_event(enum vm_event_item item)
36{
37 this_cpu_inc(vm_event_states.event[item]);
38}
39
40static inline void __count_vm_events(enum vm_event_item item, long delta)
41{
42 __this_cpu_add(vm_event_states.event[item], delta);
43}
44
45static inline void count_vm_events(enum vm_event_item item, long delta)
46{
47 this_cpu_add(vm_event_states.event[item], delta);
48}
49
50extern void all_vm_events(unsigned long *);
51
52extern void vm_events_fold_cpu(int cpu);
53
54#else
55
56
57static inline void count_vm_event(enum vm_event_item item)
58{
59}
60static inline void count_vm_events(enum vm_event_item item, long delta)
61{
62}
63static inline void __count_vm_event(enum vm_event_item item)
64{
65}
66static inline void __count_vm_events(enum vm_event_item item, long delta)
67{
68}
69static inline void all_vm_events(unsigned long *ret)
70{
71}
72static inline void vm_events_fold_cpu(int cpu)
73{
74}
75
76#endif
77
78#ifdef CONFIG_NUMA_BALANCING
79#define count_vm_numa_event(x) count_vm_event(x)
80#define count_vm_numa_events(x, y) count_vm_events(x, y)
81#else
82#define count_vm_numa_event(x) do {} while (0)
83#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
84#endif
85
86#define __count_zone_vm_events(item, zone, delta) \
87 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
88 zone_idx(zone), delta)
89
90
91
92
93extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94
95static inline void zone_page_state_add(long x, struct zone *zone,
96 enum zone_stat_item item)
97{
98 atomic_long_add(x, &zone->vm_stat[item]);
99 atomic_long_add(x, &vm_stat[item]);
100}
101
102static inline unsigned long global_page_state(enum zone_stat_item item)
103{
104 long x = atomic_long_read(&vm_stat[item]);
105#ifdef CONFIG_SMP
106 if (x < 0)
107 x = 0;
108#endif
109 return x;
110}
111
112static inline unsigned long zone_page_state(struct zone *zone,
113 enum zone_stat_item item)
114{
115 long x = atomic_long_read(&zone->vm_stat[item]);
116#ifdef CONFIG_SMP
117 if (x < 0)
118 x = 0;
119#endif
120 return x;
121}
122
123
124
125
126
127
128
129static inline unsigned long zone_page_state_snapshot(struct zone *zone,
130 enum zone_stat_item item)
131{
132 long x = atomic_long_read(&zone->vm_stat[item]);
133
134#ifdef CONFIG_SMP
135 int cpu;
136 for_each_online_cpu(cpu)
137 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
138
139 if (x < 0)
140 x = 0;
141#endif
142 return x;
143}
144
145extern unsigned long global_reclaimable_pages(void);
146extern unsigned long zone_reclaimable_pages(struct zone *zone);
147
148#ifdef CONFIG_NUMA
149
150
151
152
153
154static inline unsigned long node_page_state(int node,
155 enum zone_stat_item item)
156{
157 struct zone *zones = NODE_DATA(node)->node_zones;
158
159 return
160#ifdef CONFIG_ZONE_DMA
161 zone_page_state(&zones[ZONE_DMA], item) +
162#endif
163#ifdef CONFIG_ZONE_DMA32
164 zone_page_state(&zones[ZONE_DMA32], item) +
165#endif
166#ifdef CONFIG_HIGHMEM
167 zone_page_state(&zones[ZONE_HIGHMEM], item) +
168#endif
169 zone_page_state(&zones[ZONE_NORMAL], item) +
170 zone_page_state(&zones[ZONE_MOVABLE], item);
171}
172
173extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
174
175#else
176
177#define node_page_state(node, item) global_page_state(item)
178#define zone_statistics(_zl, _z, gfp) do { } while (0)
179
180#endif
181
182#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
183#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
184
185extern void inc_zone_state(struct zone *, enum zone_stat_item);
186
187#ifdef CONFIG_SMP
188void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
189void __inc_zone_page_state(struct page *, enum zone_stat_item);
190void __dec_zone_page_state(struct page *, enum zone_stat_item);
191
192void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
193void inc_zone_page_state(struct page *, enum zone_stat_item);
194void dec_zone_page_state(struct page *, enum zone_stat_item);
195
196extern void inc_zone_state(struct zone *, enum zone_stat_item);
197extern void __inc_zone_state(struct zone *, enum zone_stat_item);
198extern void dec_zone_state(struct zone *, enum zone_stat_item);
199extern void __dec_zone_state(struct zone *, enum zone_stat_item);
200
201void refresh_cpu_vm_stats(int);
202void refresh_zone_stat_thresholds(void);
203
204void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
205
206int calculate_pressure_threshold(struct zone *zone);
207int calculate_normal_threshold(struct zone *zone);
208void set_pgdat_percpu_threshold(pg_data_t *pgdat,
209 int (*calculate_pressure)(struct zone *));
210#else
211
212
213
214
215
216static inline void __mod_zone_page_state(struct zone *zone,
217 enum zone_stat_item item, int delta)
218{
219 zone_page_state_add(delta, zone, item);
220}
221
222static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
223{
224 atomic_long_inc(&zone->vm_stat[item]);
225 atomic_long_inc(&vm_stat[item]);
226}
227
228static inline void __inc_zone_page_state(struct page *page,
229 enum zone_stat_item item)
230{
231 __inc_zone_state(page_zone(page), item);
232}
233
234static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
235{
236 atomic_long_dec(&zone->vm_stat[item]);
237 atomic_long_dec(&vm_stat[item]);
238}
239
240static inline void __dec_zone_page_state(struct page *page,
241 enum zone_stat_item item)
242{
243 __dec_zone_state(page_zone(page), item);
244}
245
246
247
248
249
250#define inc_zone_page_state __inc_zone_page_state
251#define dec_zone_page_state __dec_zone_page_state
252#define mod_zone_page_state __mod_zone_page_state
253
254#define set_pgdat_percpu_threshold(pgdat, callback) { }
255
256static inline void refresh_cpu_vm_stats(int cpu) { }
257static inline void refresh_zone_stat_thresholds(void) { }
258
259static inline void drain_zonestat(struct zone *zone,
260 struct per_cpu_pageset *pset) { }
261#endif
262
263static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
264 int migratetype)
265{
266 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
267 if (is_migrate_cma(migratetype))
268 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
269}
270
271extern const char * const vmstat_text[];
272
273#endif
274