1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/rtnetlink.h>
25#include <linux/reciprocal_div.h>
26#include <linux/rbtree.h>
27
28#include <net/netlink.h>
29#include <net/pkt_sched.h>
30#include <net/inet_ecn.h>
31
32#define VERSION "1.3"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71struct netem_sched_data {
72
73 struct rb_root t_root;
74
75
76 struct Qdisc *qdisc;
77
78 struct qdisc_watchdog watchdog;
79
80 s64 latency;
81 s64 jitter;
82
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u64 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 struct reciprocal_value cell_size_reciprocal;
95 s32 cell_overhead;
96
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
125
126 struct clgstate {
127
128 u8 state;
129
130
131 u32 a1;
132 u32 a2;
133 u32 a3;
134 u32 a4;
135 u32 a5;
136 } clg;
137
138 struct tc_netem_slot slot_config;
139 struct slotstate {
140 u64 slot_next;
141 s32 packets_left;
142 s32 bytes_left;
143 } slot;
144
145};
146
147
148
149
150
151
152
153
154struct netem_skb_cb {
155 u64 time_to_send;
156};
157
158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
160
161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
163}
164
165
166
167
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
171 state->last = prandom_u32();
172}
173
174
175
176
177
178static u32 get_crandom(struct crndstate *state)
179{
180 u64 value, rho;
181 unsigned long answer;
182
183 if (state->rho == 0)
184 return prandom_u32();
185
186 value = prandom_u32();
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
193
194
195
196
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
200 u32 rnd = prandom_u32();
201
202
203
204
205
206
207
208
209
210
211
212 switch (clg->state) {
213 case TX_IN_GAP_PERIOD:
214 if (rnd < clg->a4) {
215 clg->state = LOST_IN_BURST_PERIOD;
216 return true;
217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
218 clg->state = LOST_IN_GAP_PERIOD;
219 return true;
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
223
224 break;
225 case TX_IN_BURST_PERIOD:
226 if (rnd < clg->a5) {
227 clg->state = LOST_IN_GAP_PERIOD;
228 return true;
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
232
233 break;
234 case LOST_IN_GAP_PERIOD:
235 if (rnd < clg->a3)
236 clg->state = TX_IN_BURST_PERIOD;
237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
238 clg->state = TX_IN_GAP_PERIOD;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = LOST_IN_GAP_PERIOD;
241 return true;
242 }
243 break;
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
246 break;
247 }
248
249 return false;
250}
251
252
253
254
255
256
257
258
259
260
261
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
267 case GOOD_STATE:
268 if (prandom_u32() < clg->a1)
269 clg->state = BAD_STATE;
270 if (prandom_u32() < clg->a4)
271 return true;
272 break;
273 case BAD_STATE:
274 if (prandom_u32() < clg->a2)
275 clg->state = GOOD_STATE;
276 if (prandom_u32() > clg->a3)
277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291
292
293
294
295
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299
300
301
302
303
304 return loss_gilb_ell(q);
305 }
306
307 return false;
308}
309
310
311
312
313
314
315static s64 tabledist(s64 mu, s32 sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
318{
319 s64 x;
320 long t;
321 u32 rnd;
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328
329 if (dist == NULL)
330 return (rnd % (2 * sigma)) - sigma + mu;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
342static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
343{
344 len += q->packet_overhead;
345
346 if (q->cell_size) {
347 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
348
349 if (len > cells * q->cell_size)
350 cells++;
351 len = cells * (q->cell_size + q->cell_overhead);
352 }
353
354 return div64_u64(len * NSEC_PER_SEC, q->rate);
355}
356
357static void tfifo_reset(struct Qdisc *sch)
358{
359 struct netem_sched_data *q = qdisc_priv(sch);
360 struct rb_node *p = rb_first(&q->t_root);
361
362 while (p) {
363 struct sk_buff *skb = rb_to_skb(p);
364
365 p = rb_next(p);
366 rb_erase(&skb->rbnode, &q->t_root);
367 rtnl_kfree_skbs(skb, skb);
368 }
369}
370
371static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
372{
373 struct netem_sched_data *q = qdisc_priv(sch);
374 u64 tnext = netem_skb_cb(nskb)->time_to_send;
375 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
376
377 while (*p) {
378 struct sk_buff *skb;
379
380 parent = *p;
381 skb = rb_to_skb(parent);
382 if (tnext >= netem_skb_cb(skb)->time_to_send)
383 p = &parent->rb_right;
384 else
385 p = &parent->rb_left;
386 }
387 rb_link_node(&nskb->rbnode, parent, p);
388 rb_insert_color(&nskb->rbnode, &q->t_root);
389 sch->q.qlen++;
390}
391
392
393
394
395
396static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
397 struct sk_buff **to_free)
398{
399 struct sk_buff *segs;
400 netdev_features_t features = netif_skb_features(skb);
401
402 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
403
404 if (IS_ERR_OR_NULL(segs)) {
405 qdisc_drop(skb, sch, to_free);
406 return NULL;
407 }
408 consume_skb(skb);
409 return segs;
410}
411
412static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
413{
414 skb->next = qh->head;
415
416 if (!qh->head)
417 qh->tail = skb;
418 qh->head = skb;
419 qh->qlen++;
420}
421
422
423
424
425
426
427
428static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
429 struct sk_buff **to_free)
430{
431 struct netem_sched_data *q = qdisc_priv(sch);
432
433 struct netem_skb_cb *cb;
434 struct sk_buff *skb2;
435 struct sk_buff *segs = NULL;
436 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
437 int nb = 0;
438 int count = 1;
439 int rc = NET_XMIT_SUCCESS;
440
441
442 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
443 ++count;
444
445
446 if (loss_event(q)) {
447 if (q->ecn && INET_ECN_set_ce(skb))
448 qdisc_qstats_drop(sch);
449 else
450 --count;
451 }
452 if (count == 0) {
453 qdisc_qstats_drop(sch);
454 __qdisc_drop(skb, to_free);
455 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
456 }
457
458
459
460
461 if (q->latency || q->jitter || q->rate)
462 skb_orphan_partial(skb);
463
464
465
466
467
468
469 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
470 struct Qdisc *rootq = qdisc_root(sch);
471 u32 dupsave = q->duplicate;
472
473 q->duplicate = 0;
474 rootq->enqueue(skb2, rootq, to_free);
475 q->duplicate = dupsave;
476 }
477
478
479
480
481
482
483
484 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
485 if (skb_is_gso(skb)) {
486 segs = netem_segment(skb, sch, to_free);
487 if (!segs)
488 return NET_XMIT_DROP;
489 } else {
490 segs = skb;
491 }
492
493 skb = segs;
494 segs = segs->next;
495
496 skb = skb_unshare(skb, GFP_ATOMIC);
497 if (unlikely(!skb)) {
498 qdisc_qstats_drop(sch);
499 goto finish_segs;
500 }
501 if (skb->ip_summed == CHECKSUM_PARTIAL &&
502 skb_checksum_help(skb)) {
503 qdisc_drop(skb, sch, to_free);
504 goto finish_segs;
505 }
506
507 skb->data[prandom_u32() % skb_headlen(skb)] ^=
508 1<<(prandom_u32() % 8);
509 }
510
511 if (unlikely(sch->q.qlen >= sch->limit))
512 return qdisc_drop(skb, sch, to_free);
513
514 qdisc_qstats_backlog_inc(sch, skb);
515
516 cb = netem_skb_cb(skb);
517 if (q->gap == 0 ||
518 q->counter < q->gap - 1 ||
519 q->reorder < get_crandom(&q->reorder_cor)) {
520 u64 now;
521 s64 delay;
522
523 delay = tabledist(q->latency, q->jitter,
524 &q->delay_cor, q->delay_dist);
525
526 now = ktime_get_ns();
527
528 if (q->rate) {
529 struct netem_skb_cb *last = NULL;
530
531 if (sch->q.tail)
532 last = netem_skb_cb(sch->q.tail);
533 if (q->t_root.rb_node) {
534 struct sk_buff *t_skb;
535 struct netem_skb_cb *t_last;
536
537 t_skb = skb_rb_last(&q->t_root);
538 t_last = netem_skb_cb(t_skb);
539 if (!last ||
540 t_last->time_to_send > last->time_to_send) {
541 last = t_last;
542 }
543 }
544
545 if (last) {
546
547
548
549
550
551 delay -= last->time_to_send - now;
552 delay = max_t(s64, 0, delay);
553 now = last->time_to_send;
554 }
555
556 delay += packet_time_ns(qdisc_pkt_len(skb), q);
557 }
558
559 cb->time_to_send = now + delay;
560 ++q->counter;
561 tfifo_enqueue(skb, sch);
562 } else {
563
564
565
566
567 cb->time_to_send = ktime_get_ns();
568 q->counter = 0;
569
570 netem_enqueue_skb_head(&sch->q, skb);
571 sch->qstats.requeues++;
572 }
573
574finish_segs:
575 if (segs) {
576 while (segs) {
577 skb2 = segs->next;
578 segs->next = NULL;
579 qdisc_skb_cb(segs)->pkt_len = segs->len;
580 last_len = segs->len;
581 rc = qdisc_enqueue(segs, sch, to_free);
582 if (rc != NET_XMIT_SUCCESS) {
583 if (net_xmit_drop_count(rc))
584 qdisc_qstats_drop(sch);
585 } else {
586 nb++;
587 len += last_len;
588 }
589 segs = skb2;
590 }
591 sch->q.qlen += nb;
592 if (nb > 1)
593 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
594 }
595 return NET_XMIT_SUCCESS;
596}
597
598
599
600
601
602static void get_slot_next(struct netem_sched_data *q, u64 now)
603{
604 q->slot.slot_next = now + q->slot_config.min_delay +
605 (prandom_u32() *
606 (q->slot_config.max_delay -
607 q->slot_config.min_delay) >> 32);
608 q->slot.packets_left = q->slot_config.max_packets;
609 q->slot.bytes_left = q->slot_config.max_bytes;
610}
611
612static struct sk_buff *netem_dequeue(struct Qdisc *sch)
613{
614 struct netem_sched_data *q = qdisc_priv(sch);
615 struct sk_buff *skb;
616 struct rb_node *p;
617
618tfifo_dequeue:
619 skb = __qdisc_dequeue_head(&sch->q);
620 if (skb) {
621 qdisc_qstats_backlog_dec(sch, skb);
622deliver:
623 qdisc_bstats_update(sch, skb);
624 return skb;
625 }
626 p = rb_first(&q->t_root);
627 if (p) {
628 u64 time_to_send;
629 u64 now = ktime_get_ns();
630
631 skb = rb_to_skb(p);
632
633
634 time_to_send = netem_skb_cb(skb)->time_to_send;
635 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
636 get_slot_next(q, now);
637
638 if (time_to_send <= now && q->slot.slot_next <= now) {
639 rb_erase(p, &q->t_root);
640 sch->q.qlen--;
641 qdisc_qstats_backlog_dec(sch, skb);
642 skb->next = NULL;
643 skb->prev = NULL;
644
645
646
647 skb->dev = qdisc_dev(sch);
648
649#ifdef CONFIG_NET_CLS_ACT
650
651
652
653
654 if (skb->tc_redirected && skb->tc_from_ingress)
655 skb->tstamp = 0;
656#endif
657
658 if (q->slot.slot_next) {
659 q->slot.packets_left--;
660 q->slot.bytes_left -= qdisc_pkt_len(skb);
661 if (q->slot.packets_left <= 0 ||
662 q->slot.bytes_left <= 0)
663 get_slot_next(q, now);
664 }
665
666 if (q->qdisc) {
667 unsigned int pkt_len = qdisc_pkt_len(skb);
668 struct sk_buff *to_free = NULL;
669 int err;
670
671 err = qdisc_enqueue(skb, q->qdisc, &to_free);
672 kfree_skb_list(to_free);
673 if (err != NET_XMIT_SUCCESS &&
674 net_xmit_drop_count(err)) {
675 qdisc_qstats_drop(sch);
676 qdisc_tree_reduce_backlog(sch, 1,
677 pkt_len);
678 }
679 goto tfifo_dequeue;
680 }
681 goto deliver;
682 }
683
684 if (q->qdisc) {
685 skb = q->qdisc->ops->dequeue(q->qdisc);
686 if (skb)
687 goto deliver;
688 }
689
690 qdisc_watchdog_schedule_ns(&q->watchdog,
691 max(time_to_send,
692 q->slot.slot_next));
693 }
694
695 if (q->qdisc) {
696 skb = q->qdisc->ops->dequeue(q->qdisc);
697 if (skb)
698 goto deliver;
699 }
700 return NULL;
701}
702
703static void netem_reset(struct Qdisc *sch)
704{
705 struct netem_sched_data *q = qdisc_priv(sch);
706
707 qdisc_reset_queue(sch);
708 tfifo_reset(sch);
709 if (q->qdisc)
710 qdisc_reset(q->qdisc);
711 qdisc_watchdog_cancel(&q->watchdog);
712}
713
714static void dist_free(struct disttable *d)
715{
716 kvfree(d);
717}
718
719
720
721
722
723
724static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
725{
726 struct netem_sched_data *q = qdisc_priv(sch);
727 size_t n = nla_len(attr)/sizeof(__s16);
728 const __s16 *data = nla_data(attr);
729 spinlock_t *root_lock;
730 struct disttable *d;
731 int i;
732
733 if (n > NETEM_DIST_MAX)
734 return -EINVAL;
735
736 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
737 if (!d)
738 return -ENOMEM;
739
740 d->size = n;
741 for (i = 0; i < n; i++)
742 d->table[i] = data[i];
743
744 root_lock = qdisc_root_sleeping_lock(sch);
745
746 spin_lock_bh(root_lock);
747 swap(q->delay_dist, d);
748 spin_unlock_bh(root_lock);
749
750 dist_free(d);
751 return 0;
752}
753
754static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
755{
756 const struct tc_netem_slot *c = nla_data(attr);
757
758 q->slot_config = *c;
759 if (q->slot_config.max_packets == 0)
760 q->slot_config.max_packets = INT_MAX;
761 if (q->slot_config.max_bytes == 0)
762 q->slot_config.max_bytes = INT_MAX;
763 q->slot.packets_left = q->slot_config.max_packets;
764 q->slot.bytes_left = q->slot_config.max_bytes;
765 if (q->slot_config.min_delay | q->slot_config.max_delay)
766 q->slot.slot_next = ktime_get_ns();
767 else
768 q->slot.slot_next = 0;
769}
770
771static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
772{
773 const struct tc_netem_corr *c = nla_data(attr);
774
775 init_crandom(&q->delay_cor, c->delay_corr);
776 init_crandom(&q->loss_cor, c->loss_corr);
777 init_crandom(&q->dup_cor, c->dup_corr);
778}
779
780static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
781{
782 const struct tc_netem_reorder *r = nla_data(attr);
783
784 q->reorder = r->probability;
785 init_crandom(&q->reorder_cor, r->correlation);
786}
787
788static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
789{
790 const struct tc_netem_corrupt *r = nla_data(attr);
791
792 q->corrupt = r->probability;
793 init_crandom(&q->corrupt_cor, r->correlation);
794}
795
796static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
797{
798 const struct tc_netem_rate *r = nla_data(attr);
799
800 q->rate = r->rate;
801 q->packet_overhead = r->packet_overhead;
802 q->cell_size = r->cell_size;
803 q->cell_overhead = r->cell_overhead;
804 if (q->cell_size)
805 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
806 else
807 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
808}
809
810static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
811{
812 const struct nlattr *la;
813 int rem;
814
815 nla_for_each_nested(la, attr, rem) {
816 u16 type = nla_type(la);
817
818 switch (type) {
819 case NETEM_LOSS_GI: {
820 const struct tc_netem_gimodel *gi = nla_data(la);
821
822 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
823 pr_info("netem: incorrect gi model size\n");
824 return -EINVAL;
825 }
826
827 q->loss_model = CLG_4_STATES;
828
829 q->clg.state = TX_IN_GAP_PERIOD;
830 q->clg.a1 = gi->p13;
831 q->clg.a2 = gi->p31;
832 q->clg.a3 = gi->p32;
833 q->clg.a4 = gi->p14;
834 q->clg.a5 = gi->p23;
835 break;
836 }
837
838 case NETEM_LOSS_GE: {
839 const struct tc_netem_gemodel *ge = nla_data(la);
840
841 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
842 pr_info("netem: incorrect ge model size\n");
843 return -EINVAL;
844 }
845
846 q->loss_model = CLG_GILB_ELL;
847 q->clg.state = GOOD_STATE;
848 q->clg.a1 = ge->p;
849 q->clg.a2 = ge->r;
850 q->clg.a3 = ge->h;
851 q->clg.a4 = ge->k1;
852 break;
853 }
854
855 default:
856 pr_info("netem: unknown loss type %u\n", type);
857 return -EINVAL;
858 }
859 }
860
861 return 0;
862}
863
864static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
865 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
866 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
867 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
868 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
869 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
870 [TCA_NETEM_ECN] = { .type = NLA_U32 },
871 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
872 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
873 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
874 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
875};
876
877static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
878 const struct nla_policy *policy, int len)
879{
880 int nested_len = nla_len(nla) - NLA_ALIGN(len);
881
882 if (nested_len < 0) {
883 pr_info("netem: invalid attributes len %d\n", nested_len);
884 return -EINVAL;
885 }
886
887 if (nested_len >= nla_attr_size(0))
888 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
889 nested_len, policy, NULL);
890
891 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
892 return 0;
893}
894
895
896static int netem_change(struct Qdisc *sch, struct nlattr *opt)
897{
898 struct netem_sched_data *q = qdisc_priv(sch);
899 struct nlattr *tb[TCA_NETEM_MAX + 1];
900 struct tc_netem_qopt *qopt;
901 struct clgstate old_clg;
902 int old_loss_model = CLG_RANDOM;
903 int ret;
904
905 if (opt == NULL)
906 return -EINVAL;
907
908 qopt = nla_data(opt);
909 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
910 if (ret < 0)
911 return ret;
912
913
914 old_clg = q->clg;
915 old_loss_model = q->loss_model;
916
917 if (tb[TCA_NETEM_LOSS]) {
918 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
919 if (ret) {
920 q->loss_model = old_loss_model;
921 return ret;
922 }
923 } else {
924 q->loss_model = CLG_RANDOM;
925 }
926
927 if (tb[TCA_NETEM_DELAY_DIST]) {
928 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
929 if (ret) {
930
931
932
933
934 q->clg = old_clg;
935 q->loss_model = old_loss_model;
936 return ret;
937 }
938 }
939
940 sch->limit = qopt->limit;
941
942 q->latency = PSCHED_TICKS2NS(qopt->latency);
943 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
944 q->limit = qopt->limit;
945 q->gap = qopt->gap;
946 q->counter = 0;
947 q->loss = qopt->loss;
948 q->duplicate = qopt->duplicate;
949
950
951
952
953 if (q->gap)
954 q->reorder = ~0;
955
956 if (tb[TCA_NETEM_CORR])
957 get_correlation(q, tb[TCA_NETEM_CORR]);
958
959 if (tb[TCA_NETEM_REORDER])
960 get_reorder(q, tb[TCA_NETEM_REORDER]);
961
962 if (tb[TCA_NETEM_CORRUPT])
963 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
964
965 if (tb[TCA_NETEM_RATE])
966 get_rate(q, tb[TCA_NETEM_RATE]);
967
968 if (tb[TCA_NETEM_RATE64])
969 q->rate = max_t(u64, q->rate,
970 nla_get_u64(tb[TCA_NETEM_RATE64]));
971
972 if (tb[TCA_NETEM_LATENCY64])
973 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
974
975 if (tb[TCA_NETEM_JITTER64])
976 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
977
978 if (tb[TCA_NETEM_ECN])
979 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
980
981 if (tb[TCA_NETEM_SLOT])
982 get_slot(q, tb[TCA_NETEM_SLOT]);
983
984 return ret;
985}
986
987static int netem_init(struct Qdisc *sch, struct nlattr *opt)
988{
989 struct netem_sched_data *q = qdisc_priv(sch);
990 int ret;
991
992 qdisc_watchdog_init(&q->watchdog, sch);
993
994 if (!opt)
995 return -EINVAL;
996
997 q->loss_model = CLG_RANDOM;
998 ret = netem_change(sch, opt);
999 if (ret)
1000 pr_info("netem: change failed\n");
1001 return ret;
1002}
1003
1004static void netem_destroy(struct Qdisc *sch)
1005{
1006 struct netem_sched_data *q = qdisc_priv(sch);
1007
1008 qdisc_watchdog_cancel(&q->watchdog);
1009 if (q->qdisc)
1010 qdisc_destroy(q->qdisc);
1011 dist_free(q->delay_dist);
1012}
1013
1014static int dump_loss_model(const struct netem_sched_data *q,
1015 struct sk_buff *skb)
1016{
1017 struct nlattr *nest;
1018
1019 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1020 if (nest == NULL)
1021 goto nla_put_failure;
1022
1023 switch (q->loss_model) {
1024 case CLG_RANDOM:
1025
1026 nla_nest_cancel(skb, nest);
1027 return 0;
1028
1029 case CLG_4_STATES: {
1030 struct tc_netem_gimodel gi = {
1031 .p13 = q->clg.a1,
1032 .p31 = q->clg.a2,
1033 .p32 = q->clg.a3,
1034 .p14 = q->clg.a4,
1035 .p23 = q->clg.a5,
1036 };
1037
1038 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1039 goto nla_put_failure;
1040 break;
1041 }
1042 case CLG_GILB_ELL: {
1043 struct tc_netem_gemodel ge = {
1044 .p = q->clg.a1,
1045 .r = q->clg.a2,
1046 .h = q->clg.a3,
1047 .k1 = q->clg.a4,
1048 };
1049
1050 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1051 goto nla_put_failure;
1052 break;
1053 }
1054 }
1055
1056 nla_nest_end(skb, nest);
1057 return 0;
1058
1059nla_put_failure:
1060 nla_nest_cancel(skb, nest);
1061 return -1;
1062}
1063
1064static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1065{
1066 const struct netem_sched_data *q = qdisc_priv(sch);
1067 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1068 struct tc_netem_qopt qopt;
1069 struct tc_netem_corr cor;
1070 struct tc_netem_reorder reorder;
1071 struct tc_netem_corrupt corrupt;
1072 struct tc_netem_rate rate;
1073 struct tc_netem_slot slot;
1074
1075 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1076 UINT_MAX);
1077 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1078 UINT_MAX);
1079 qopt.limit = q->limit;
1080 qopt.loss = q->loss;
1081 qopt.gap = q->gap;
1082 qopt.duplicate = q->duplicate;
1083 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1084 goto nla_put_failure;
1085
1086 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1087 goto nla_put_failure;
1088
1089 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1090 goto nla_put_failure;
1091
1092 cor.delay_corr = q->delay_cor.rho;
1093 cor.loss_corr = q->loss_cor.rho;
1094 cor.dup_corr = q->dup_cor.rho;
1095 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1096 goto nla_put_failure;
1097
1098 reorder.probability = q->reorder;
1099 reorder.correlation = q->reorder_cor.rho;
1100 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1101 goto nla_put_failure;
1102
1103 corrupt.probability = q->corrupt;
1104 corrupt.correlation = q->corrupt_cor.rho;
1105 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1106 goto nla_put_failure;
1107
1108 if (q->rate >= (1ULL << 32)) {
1109 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1110 TCA_NETEM_PAD))
1111 goto nla_put_failure;
1112 rate.rate = ~0U;
1113 } else {
1114 rate.rate = q->rate;
1115 }
1116 rate.packet_overhead = q->packet_overhead;
1117 rate.cell_size = q->cell_size;
1118 rate.cell_overhead = q->cell_overhead;
1119 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1120 goto nla_put_failure;
1121
1122 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1123 goto nla_put_failure;
1124
1125 if (dump_loss_model(q, skb) != 0)
1126 goto nla_put_failure;
1127
1128 if (q->slot_config.min_delay | q->slot_config.max_delay) {
1129 slot = q->slot_config;
1130 if (slot.max_packets == INT_MAX)
1131 slot.max_packets = 0;
1132 if (slot.max_bytes == INT_MAX)
1133 slot.max_bytes = 0;
1134 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1135 goto nla_put_failure;
1136 }
1137
1138 return nla_nest_end(skb, nla);
1139
1140nla_put_failure:
1141 nlmsg_trim(skb, nla);
1142 return -1;
1143}
1144
1145static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1146 struct sk_buff *skb, struct tcmsg *tcm)
1147{
1148 struct netem_sched_data *q = qdisc_priv(sch);
1149
1150 if (cl != 1 || !q->qdisc)
1151 return -ENOENT;
1152
1153 tcm->tcm_handle |= TC_H_MIN(1);
1154 tcm->tcm_info = q->qdisc->handle;
1155
1156 return 0;
1157}
1158
1159static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1160 struct Qdisc **old)
1161{
1162 struct netem_sched_data *q = qdisc_priv(sch);
1163
1164 *old = qdisc_replace(sch, new, &q->qdisc);
1165 return 0;
1166}
1167
1168static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1169{
1170 struct netem_sched_data *q = qdisc_priv(sch);
1171 return q->qdisc;
1172}
1173
1174static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1175{
1176 return 1;
1177}
1178
1179static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1180{
1181 if (!walker->stop) {
1182 if (walker->count >= walker->skip)
1183 if (walker->fn(sch, 1, walker) < 0) {
1184 walker->stop = 1;
1185 return;
1186 }
1187 walker->count++;
1188 }
1189}
1190
1191static const struct Qdisc_class_ops netem_class_ops = {
1192 .graft = netem_graft,
1193 .leaf = netem_leaf,
1194 .find = netem_find,
1195 .walk = netem_walk,
1196 .dump = netem_dump_class,
1197};
1198
1199static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1200 .id = "netem",
1201 .cl_ops = &netem_class_ops,
1202 .priv_size = sizeof(struct netem_sched_data),
1203 .enqueue = netem_enqueue,
1204 .dequeue = netem_dequeue,
1205 .peek = qdisc_peek_dequeued,
1206 .init = netem_init,
1207 .reset = netem_reset,
1208 .destroy = netem_destroy,
1209 .change = netem_change,
1210 .dump = netem_dump,
1211 .owner = THIS_MODULE,
1212};
1213
1214
1215static int __init netem_module_init(void)
1216{
1217 pr_info("netem: version " VERSION "\n");
1218 return register_qdisc(&netem_qdisc_ops);
1219}
1220static void __exit netem_module_exit(void)
1221{
1222 unregister_qdisc(&netem_qdisc_ops);
1223}
1224module_init(netem_module_init)
1225module_exit(netem_module_exit)
1226MODULE_LICENSE("GPL");
1227