1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <asm/uaccess.h>
17#include <linux/bitops.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/jiffies.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/init.h>
33#include <linux/rbtree.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <net/gen_stats.h>
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define EST_MAX_INTERVAL 5
80
81struct gen_estimator
82{
83 struct list_head list;
84 struct gnet_stats_basic_packed *bstats;
85 struct gnet_stats_rate_est64 *rate_est;
86 spinlock_t *stats_lock;
87 int ewma_log;
88 u64 last_bytes;
89 u64 avbps;
90 u32 last_packets;
91 u32 avpps;
92 struct rcu_head e_rcu;
93 struct rb_node node;
94};
95
96struct gen_estimator_head
97{
98 struct timer_list timer;
99 struct list_head list;
100};
101
102static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
103
104
105static DEFINE_RWLOCK(est_lock);
106
107
108static struct rb_root est_root = RB_ROOT;
109static DEFINE_SPINLOCK(est_tree_lock);
110
111static void est_timer(unsigned long arg)
112{
113 int idx = (int)arg;
114 struct gen_estimator *e;
115
116 rcu_read_lock();
117 list_for_each_entry_rcu(e, &elist[idx].list, list) {
118 u64 nbytes;
119 u64 brate;
120 u32 npackets;
121 u32 rate;
122
123 spin_lock(e->stats_lock);
124 read_lock(&est_lock);
125 if (e->bstats == NULL)
126 goto skip;
127
128 nbytes = e->bstats->bytes;
129 npackets = e->bstats->packets;
130 brate = (nbytes - e->last_bytes)<<(7 - idx);
131 e->last_bytes = nbytes;
132 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
133 e->rate_est->bps = (e->avbps+0xF)>>5;
134
135 rate = (npackets - e->last_packets)<<(12 - idx);
136 e->last_packets = npackets;
137 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
138 e->rate_est->pps = (e->avpps+0x1FF)>>10;
139skip:
140 read_unlock(&est_lock);
141 spin_unlock(e->stats_lock);
142 }
143
144 if (!list_empty(&elist[idx].list))
145 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
146 rcu_read_unlock();
147}
148
149static void gen_add_node(struct gen_estimator *est)
150{
151 struct rb_node **p = &est_root.rb_node, *parent = NULL;
152
153 while (*p) {
154 struct gen_estimator *e;
155
156 parent = *p;
157 e = rb_entry(parent, struct gen_estimator, node);
158
159 if (est->bstats > e->bstats)
160 p = &parent->rb_right;
161 else
162 p = &parent->rb_left;
163 }
164 rb_link_node(&est->node, parent, p);
165 rb_insert_color(&est->node, &est_root);
166}
167
168static
169struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
170 const struct gnet_stats_rate_est64 *rate_est)
171{
172 struct rb_node *p = est_root.rb_node;
173
174 while (p) {
175 struct gen_estimator *e;
176
177 e = rb_entry(p, struct gen_estimator, node);
178
179 if (bstats > e->bstats)
180 p = p->rb_right;
181 else if (bstats < e->bstats || rate_est != e->rate_est)
182 p = p->rb_left;
183 else
184 return e;
185 }
186 return NULL;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
206 struct gnet_stats_rate_est64 *rate_est,
207 spinlock_t *stats_lock,
208 struct nlattr *opt)
209{
210 struct gen_estimator *est;
211 struct gnet_estimator *parm = nla_data(opt);
212 int idx;
213
214 if (nla_len(opt) < sizeof(*parm))
215 return -EINVAL;
216
217 if (parm->interval < -2 || parm->interval > 3)
218 return -EINVAL;
219
220 est = kzalloc(sizeof(*est), GFP_KERNEL);
221 if (est == NULL)
222 return -ENOBUFS;
223
224 idx = parm->interval + 2;
225 est->bstats = bstats;
226 est->rate_est = rate_est;
227 est->stats_lock = stats_lock;
228 est->ewma_log = parm->ewma_log;
229 est->last_bytes = bstats->bytes;
230 est->avbps = rate_est->bps<<5;
231 est->last_packets = bstats->packets;
232 est->avpps = rate_est->pps<<10;
233
234 spin_lock_bh(&est_tree_lock);
235 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx);
238 }
239
240 if (list_empty(&elist[idx].list))
241 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
242
243 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est);
245 spin_unlock_bh(&est_tree_lock);
246
247 return 0;
248}
249EXPORT_SYMBOL(gen_new_estimator);
250
251
252
253
254
255
256
257
258
259
260void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
261 struct gnet_stats_rate_est64 *rate_est)
262{
263 struct gen_estimator *e;
264
265 spin_lock_bh(&est_tree_lock);
266 while ((e = gen_find_node(bstats, rate_est))) {
267 rb_erase(&e->node, &est_root);
268
269 write_lock(&est_lock);
270 e->bstats = NULL;
271 write_unlock(&est_lock);
272
273 list_del_rcu(&e->list);
274 kfree_rcu(e, e_rcu);
275 }
276 spin_unlock_bh(&est_tree_lock);
277}
278EXPORT_SYMBOL(gen_kill_estimator);
279
280
281
282
283
284
285
286
287
288
289
290
291
292int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
293 struct gnet_stats_rate_est64 *rate_est,
294 spinlock_t *stats_lock, struct nlattr *opt)
295{
296 gen_kill_estimator(bstats, rate_est);
297 return gen_new_estimator(bstats, rate_est, stats_lock, opt);
298}
299EXPORT_SYMBOL(gen_replace_estimator);
300
301
302
303
304
305
306
307
308bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
309 const struct gnet_stats_rate_est64 *rate_est)
310{
311 bool res;
312
313 ASSERT_RTNL();
314
315 spin_lock_bh(&est_tree_lock);
316 res = gen_find_node(bstats, rate_est) != NULL;
317 spin_unlock_bh(&est_tree_lock);
318
319 return res;
320}
321EXPORT_SYMBOL(gen_estimator_active);
322