1
2
3
4
5
6
7
8
9
10
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/socket.h>
17#include <linux/rtnetlink.h>
18#include <linux/gen_stats.h>
19#include <net/netlink.h>
20#include <net/gen_stats.h>
21
22
23static inline int
24gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
25{
26 if (nla_put_64bit(d->skb, type, size, buf, padattr))
27 goto nla_put_failure;
28 return 0;
29
30nla_put_failure:
31 if (d->lock)
32 spin_unlock_bh(d->lock);
33 kfree(d->xstats);
34 d->xstats = NULL;
35 d->xstats_len = 0;
36 return -1;
37}
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58int
59gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
60 int xstats_type, spinlock_t *lock,
61 struct gnet_dump *d, int padattr)
62 __acquires(lock)
63{
64 memset(d, 0, sizeof(*d));
65
66 if (type)
67 d->tail = (struct nlattr *)skb_tail_pointer(skb);
68 d->skb = skb;
69 d->compat_tc_stats = tc_stats_type;
70 d->compat_xstats = xstats_type;
71 d->padattr = padattr;
72 if (lock) {
73 d->lock = lock;
74 spin_lock_bh(lock);
75 }
76 if (d->tail) {
77 int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
78
79
80
81
82
83
84
85 if (ret == 0 && d->tail->nla_type == padattr)
86 d->tail = (struct nlattr *)((char *)d->tail +
87 NLA_ALIGN(d->tail->nla_len));
88 return ret;
89 }
90
91 return 0;
92}
93EXPORT_SYMBOL(gnet_stats_start_copy_compat);
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109int
110gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
111 struct gnet_dump *d, int padattr)
112{
113 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
114}
115EXPORT_SYMBOL(gnet_stats_start_copy);
116
117static void
118__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
119 struct gnet_stats_basic_cpu __percpu *cpu)
120{
121 int i;
122
123 for_each_possible_cpu(i) {
124 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
125 unsigned int start;
126 u64 bytes;
127 u32 packets;
128
129 do {
130 start = u64_stats_fetch_begin_irq(&bcpu->syncp);
131 bytes = bcpu->bstats.bytes;
132 packets = bcpu->bstats.packets;
133 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
134
135 bstats->bytes += bytes;
136 bstats->packets += packets;
137 }
138}
139
140void
141__gnet_stats_copy_basic(const seqcount_t *running,
142 struct gnet_stats_basic_packed *bstats,
143 struct gnet_stats_basic_cpu __percpu *cpu,
144 struct gnet_stats_basic_packed *b)
145{
146 unsigned int seq;
147
148 if (cpu) {
149 __gnet_stats_copy_basic_cpu(bstats, cpu);
150 return;
151 }
152 do {
153 if (running)
154 seq = read_seqcount_begin(running);
155 bstats->bytes = b->bytes;
156 bstats->packets = b->packets;
157 } while (running && read_seqcount_retry(running, seq));
158}
159EXPORT_SYMBOL(__gnet_stats_copy_basic);
160
161static int
162___gnet_stats_copy_basic(const seqcount_t *running,
163 struct gnet_dump *d,
164 struct gnet_stats_basic_cpu __percpu *cpu,
165 struct gnet_stats_basic_packed *b,
166 int type)
167{
168 struct gnet_stats_basic_packed bstats = {0};
169
170 __gnet_stats_copy_basic(running, &bstats, cpu, b);
171
172 if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
173 d->tc_stats.bytes = bstats.bytes;
174 d->tc_stats.packets = bstats.packets;
175 }
176
177 if (d->tail) {
178 struct gnet_stats_basic sb;
179
180 memset(&sb, 0, sizeof(sb));
181 sb.bytes = bstats.bytes;
182 sb.packets = bstats.packets;
183 return gnet_stats_copy(d, type, &sb, sizeof(sb),
184 TCA_STATS_PAD);
185 }
186 return 0;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202int
203gnet_stats_copy_basic(const seqcount_t *running,
204 struct gnet_dump *d,
205 struct gnet_stats_basic_cpu __percpu *cpu,
206 struct gnet_stats_basic_packed *b)
207{
208 return ___gnet_stats_copy_basic(running, d, cpu, b,
209 TCA_STATS_BASIC);
210}
211EXPORT_SYMBOL(gnet_stats_copy_basic);
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226int
227gnet_stats_copy_basic_hw(const seqcount_t *running,
228 struct gnet_dump *d,
229 struct gnet_stats_basic_cpu __percpu *cpu,
230 struct gnet_stats_basic_packed *b)
231{
232 return ___gnet_stats_copy_basic(running, d, cpu, b,
233 TCA_STATS_BASIC_HW);
234}
235EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
236
237
238
239
240
241
242
243
244
245
246
247
248int
249gnet_stats_copy_rate_est(struct gnet_dump *d,
250 struct net_rate_estimator __rcu **rate_est)
251{
252 struct gnet_stats_rate_est64 sample;
253 struct gnet_stats_rate_est est;
254 int res;
255
256 if (!gen_estimator_read(rate_est, &sample))
257 return 0;
258 est.bps = min_t(u64, UINT_MAX, sample.bps);
259
260 est.pps = sample.pps;
261
262 if (d->compat_tc_stats) {
263 d->tc_stats.bps = est.bps;
264 d->tc_stats.pps = est.pps;
265 }
266
267 if (d->tail) {
268 res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
269 TCA_STATS_PAD);
270 if (res < 0 || est.bps == sample.bps)
271 return res;
272
273 return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
274 sizeof(sample), TCA_STATS_PAD);
275 }
276
277 return 0;
278}
279EXPORT_SYMBOL(gnet_stats_copy_rate_est);
280
281static void
282__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
283 const struct gnet_stats_queue __percpu *q)
284{
285 int i;
286
287 for_each_possible_cpu(i) {
288 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
289
290 qstats->qlen = 0;
291 qstats->backlog += qcpu->backlog;
292 qstats->drops += qcpu->drops;
293 qstats->requeues += qcpu->requeues;
294 qstats->overlimits += qcpu->overlimits;
295 }
296}
297
298void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
299 const struct gnet_stats_queue __percpu *cpu,
300 const struct gnet_stats_queue *q,
301 __u32 qlen)
302{
303 if (cpu) {
304 __gnet_stats_copy_queue_cpu(qstats, cpu);
305 } else {
306 qstats->qlen = q->qlen;
307 qstats->backlog = q->backlog;
308 qstats->drops = q->drops;
309 qstats->requeues = q->requeues;
310 qstats->overlimits = q->overlimits;
311 }
312
313 qstats->qlen = qlen;
314}
315EXPORT_SYMBOL(__gnet_stats_copy_queue);
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331int
332gnet_stats_copy_queue(struct gnet_dump *d,
333 struct gnet_stats_queue __percpu *cpu_q,
334 struct gnet_stats_queue *q, __u32 qlen)
335{
336 struct gnet_stats_queue qstats = {0};
337
338 __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
339
340 if (d->compat_tc_stats) {
341 d->tc_stats.drops = qstats.drops;
342 d->tc_stats.qlen = qstats.qlen;
343 d->tc_stats.backlog = qstats.backlog;
344 d->tc_stats.overlimits = qstats.overlimits;
345 }
346
347 if (d->tail)
348 return gnet_stats_copy(d, TCA_STATS_QUEUE,
349 &qstats, sizeof(qstats),
350 TCA_STATS_PAD);
351
352 return 0;
353}
354EXPORT_SYMBOL(gnet_stats_copy_queue);
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369int
370gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
371{
372 if (d->compat_xstats) {
373 d->xstats = kmemdup(st, len, GFP_ATOMIC);
374 if (!d->xstats)
375 goto err_out;
376 d->xstats_len = len;
377 }
378
379 if (d->tail)
380 return gnet_stats_copy(d, TCA_STATS_APP, st, len,
381 TCA_STATS_PAD);
382
383 return 0;
384
385err_out:
386 if (d->lock)
387 spin_unlock_bh(d->lock);
388 d->xstats_len = 0;
389 return -1;
390}
391EXPORT_SYMBOL(gnet_stats_copy_app);
392
393
394
395
396
397
398
399
400
401
402
403
404
405int
406gnet_stats_finish_copy(struct gnet_dump *d)
407{
408 if (d->tail)
409 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
410
411 if (d->compat_tc_stats)
412 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
413 sizeof(d->tc_stats), d->padattr) < 0)
414 return -1;
415
416 if (d->compat_xstats && d->xstats) {
417 if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
418 d->xstats_len, d->padattr) < 0)
419 return -1;
420 }
421
422 if (d->lock)
423 spin_unlock_bh(d->lock);
424 kfree(d->xstats);
425 d->xstats = NULL;
426 d->xstats_len = 0;
427 return 0;
428}
429EXPORT_SYMBOL(gnet_stats_finish_copy);
430