1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/string.h>
33#include <linux/errno.h>
34#include <linux/skbuff.h>
35#include <linux/list.h>
36#include <linux/compiler.h>
37#include <linux/rbtree.h>
38#include <linux/workqueue.h>
39#include <linux/slab.h>
40#include <net/netlink.h>
41#include <net/pkt_sched.h>
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56static int htb_hysteresis __read_mostly = 0;
57#define HTB_VER 0x30011
58
59#if HTB_VER >> 16 != TC_HTB_PROTOVER
60#error "Mismatched sch_htb.c and pkt_sch.h"
61#endif
62
63
64module_param (htb_hysteresis, int, 0640);
65MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
66
67
68enum htb_cmode {
69 HTB_CANT_SEND,
70 HTB_MAY_BORROW,
71 HTB_CAN_SEND
72};
73
74
75struct htb_class {
76 struct Qdisc_class_common common;
77
78 struct gnet_stats_basic_packed bstats;
79 struct gnet_stats_queue qstats;
80 struct gnet_stats_rate_est rate_est;
81 struct tc_htb_xstats xstats;
82 int refcnt;
83
84
85 int level;
86 unsigned int children;
87 struct htb_class *parent;
88
89 int prio;
90 int quantum;
91
92 union {
93 struct htb_class_leaf {
94 struct Qdisc *q;
95 int deficit[TC_HTB_MAXDEPTH];
96 struct list_head drop_list;
97 } leaf;
98 struct htb_class_inner {
99 struct rb_root feed[TC_HTB_NUMPRIO];
100 struct rb_node *ptr[TC_HTB_NUMPRIO];
101
102
103
104
105 u32 last_ptr_id[TC_HTB_NUMPRIO];
106 } inner;
107 } un;
108 struct rb_node node[TC_HTB_NUMPRIO];
109 struct rb_node pq_node;
110 psched_time_t pq_key;
111
112 int prio_activity;
113 enum htb_cmode cmode;
114
115
116 struct tcf_proto *filter_list;
117 int filter_cnt;
118
119
120 struct qdisc_rate_table *rate;
121 struct qdisc_rate_table *ceil;
122 long buffer, cbuffer;
123 psched_tdiff_t mbuffer;
124 long tokens, ctokens;
125 psched_time_t t_c;
126};
127
128struct htb_sched {
129 struct Qdisc_class_hash clhash;
130 struct list_head drops[TC_HTB_NUMPRIO];
131
132
133 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
134 int row_mask[TC_HTB_MAXDEPTH];
135 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
136 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
137
138
139 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
140
141
142 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
143
144 int defcls;
145
146
147 struct tcf_proto *filter_list;
148
149 int rate2quantum;
150 psched_time_t now;
151 struct qdisc_watchdog watchdog;
152
153
154 struct sk_buff_head direct_queue;
155 int direct_qlen;
156
157 long direct_pkts;
158
159#define HTB_WARN_TOOMANYEVENTS 0x1
160 unsigned int warned;
161 struct work_struct work;
162};
163
164
165static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
166{
167 struct htb_sched *q = qdisc_priv(sch);
168 struct Qdisc_class_common *clc;
169
170 clc = qdisc_class_find(&q->clhash, handle);
171 if (clc == NULL)
172 return NULL;
173 return container_of(clc, struct htb_class, common);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188#define HTB_DIRECT (struct htb_class*)-1
189
190static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
191 int *qerr)
192{
193 struct htb_sched *q = qdisc_priv(sch);
194 struct htb_class *cl;
195 struct tcf_result res;
196 struct tcf_proto *tcf;
197 int result;
198
199
200
201
202 if (skb->priority == sch->handle)
203 return HTB_DIRECT;
204 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
205 return cl;
206
207 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
208 tcf = q->filter_list;
209 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
210#ifdef CONFIG_NET_CLS_ACT
211 switch (result) {
212 case TC_ACT_QUEUED:
213 case TC_ACT_STOLEN:
214 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
215 case TC_ACT_SHOT:
216 return NULL;
217 }
218#endif
219 if ((cl = (void *)res.class) == NULL) {
220 if (res.classid == sch->handle)
221 return HTB_DIRECT;
222 if ((cl = htb_find(res.classid, sch)) == NULL)
223 break;
224 }
225 if (!cl->level)
226 return cl;
227
228
229 tcf = cl->filter_list;
230 }
231
232 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
233 if (!cl || cl->level)
234 return HTB_DIRECT;
235 return cl;
236}
237
238
239
240
241
242
243
244static void htb_add_to_id_tree(struct rb_root *root,
245 struct htb_class *cl, int prio)
246{
247 struct rb_node **p = &root->rb_node, *parent = NULL;
248
249 while (*p) {
250 struct htb_class *c;
251 parent = *p;
252 c = rb_entry(parent, struct htb_class, node[prio]);
253
254 if (cl->common.classid > c->common.classid)
255 p = &parent->rb_right;
256 else
257 p = &parent->rb_left;
258 }
259 rb_link_node(&cl->node[prio], parent, p);
260 rb_insert_color(&cl->node[prio], root);
261}
262
263
264
265
266
267
268
269
270static void htb_add_to_wait_tree(struct htb_sched *q,
271 struct htb_class *cl, long delay)
272{
273 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
274
275 cl->pq_key = q->now + delay;
276 if (cl->pq_key == q->now)
277 cl->pq_key++;
278
279
280 if (q->near_ev_cache[cl->level] > cl->pq_key)
281 q->near_ev_cache[cl->level] = cl->pq_key;
282
283 while (*p) {
284 struct htb_class *c;
285 parent = *p;
286 c = rb_entry(parent, struct htb_class, pq_node);
287 if (cl->pq_key >= c->pq_key)
288 p = &parent->rb_right;
289 else
290 p = &parent->rb_left;
291 }
292 rb_link_node(&cl->pq_node, parent, p);
293 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
294}
295
296
297
298
299
300
301
302static inline void htb_next_rb_node(struct rb_node **n)
303{
304 *n = rb_next(*n);
305}
306
307
308
309
310
311
312
313static inline void htb_add_class_to_row(struct htb_sched *q,
314 struct htb_class *cl, int mask)
315{
316 q->row_mask[cl->level] |= mask;
317 while (mask) {
318 int prio = ffz(~mask);
319 mask &= ~(1 << prio);
320 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
321 }
322}
323
324
325static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
326{
327 if (RB_EMPTY_NODE(rb)) {
328 WARN_ON(1);
329 } else {
330 rb_erase(rb, root);
331 RB_CLEAR_NODE(rb);
332 }
333}
334
335
336
337
338
339
340
341
342static inline void htb_remove_class_from_row(struct htb_sched *q,
343 struct htb_class *cl, int mask)
344{
345 int m = 0;
346
347 while (mask) {
348 int prio = ffz(~mask);
349
350 mask &= ~(1 << prio);
351 if (q->ptr[cl->level][prio] == cl->node + prio)
352 htb_next_rb_node(q->ptr[cl->level] + prio);
353
354 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
355 if (!q->row[cl->level][prio].rb_node)
356 m |= 1 << prio;
357 }
358 q->row_mask[cl->level] &= ~m;
359}
360
361
362
363
364
365
366
367
368static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
369{
370 struct htb_class *p = cl->parent;
371 long m, mask = cl->prio_activity;
372
373 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
374 m = mask;
375 while (m) {
376 int prio = ffz(~m);
377 m &= ~(1 << prio);
378
379 if (p->un.inner.feed[prio].rb_node)
380
381
382 mask &= ~(1 << prio);
383
384 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
385 }
386 p->prio_activity |= mask;
387 cl = p;
388 p = cl->parent;
389
390 }
391 if (cl->cmode == HTB_CAN_SEND && mask)
392 htb_add_class_to_row(q, cl, mask);
393}
394
395
396
397
398
399
400
401
402static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
403{
404 struct htb_class *p = cl->parent;
405 long m, mask = cl->prio_activity;
406
407 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
408 m = mask;
409 mask = 0;
410 while (m) {
411 int prio = ffz(~m);
412 m &= ~(1 << prio);
413
414 if (p->un.inner.ptr[prio] == cl->node + prio) {
415
416
417
418 p->un.inner.last_ptr_id[prio] = cl->common.classid;
419 p->un.inner.ptr[prio] = NULL;
420 }
421
422 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
423
424 if (!p->un.inner.feed[prio].rb_node)
425 mask |= 1 << prio;
426 }
427
428 p->prio_activity &= ~mask;
429 cl = p;
430 p = cl->parent;
431
432 }
433 if (cl->cmode == HTB_CAN_SEND && mask)
434 htb_remove_class_from_row(q, cl, mask);
435}
436
437static inline long htb_lowater(const struct htb_class *cl)
438{
439 if (htb_hysteresis)
440 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
441 else
442 return 0;
443}
444static inline long htb_hiwater(const struct htb_class *cl)
445{
446 if (htb_hysteresis)
447 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
448 else
449 return 0;
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464static inline enum htb_cmode
465htb_class_mode(struct htb_class *cl, long *diff)
466{
467 long toks;
468
469 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
470 *diff = -toks;
471 return HTB_CANT_SEND;
472 }
473
474 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
475 return HTB_CAN_SEND;
476
477 *diff = -toks;
478 return HTB_MAY_BORROW;
479}
480
481
482
483
484
485
486
487
488
489
490static void
491htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
492{
493 enum htb_cmode new_mode = htb_class_mode(cl, diff);
494
495 if (new_mode == cl->cmode)
496 return;
497
498 if (cl->prio_activity) {
499 if (cl->cmode != HTB_CANT_SEND)
500 htb_deactivate_prios(q, cl);
501 cl->cmode = new_mode;
502 if (new_mode != HTB_CANT_SEND)
503 htb_activate_prios(q, cl);
504 } else
505 cl->cmode = new_mode;
506}
507
508
509
510
511
512
513
514
515static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
516{
517 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
518
519 if (!cl->prio_activity) {
520 cl->prio_activity = 1 << cl->prio;
521 htb_activate_prios(q, cl);
522 list_add_tail(&cl->un.leaf.drop_list,
523 q->drops + cl->prio);
524 }
525}
526
527
528
529
530
531
532
533static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
534{
535 WARN_ON(!cl->prio_activity);
536
537 htb_deactivate_prios(q, cl);
538 cl->prio_activity = 0;
539 list_del_init(&cl->un.leaf.drop_list);
540}
541
542static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
543{
544 int uninitialized_var(ret);
545 struct htb_sched *q = qdisc_priv(sch);
546 struct htb_class *cl = htb_classify(skb, sch, &ret);
547
548 if (cl == HTB_DIRECT) {
549
550 if (q->direct_queue.qlen < q->direct_qlen) {
551 __skb_queue_tail(&q->direct_queue, skb);
552 q->direct_pkts++;
553 } else {
554 kfree_skb(skb);
555 sch->qstats.drops++;
556 return NET_XMIT_DROP;
557 }
558#ifdef CONFIG_NET_CLS_ACT
559 } else if (!cl) {
560 if (ret & __NET_XMIT_BYPASS)
561 sch->qstats.drops++;
562 kfree_skb(skb);
563 return ret;
564#endif
565 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
566 if (net_xmit_drop_count(ret)) {
567 sch->qstats.drops++;
568 cl->qstats.drops++;
569 }
570 return ret;
571 } else {
572 bstats_update(&cl->bstats, skb);
573 htb_activate(q, cl);
574 }
575
576 sch->q.qlen++;
577 return NET_XMIT_SUCCESS;
578}
579
580static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
581{
582 long toks = diff + cl->tokens;
583
584 if (toks > cl->buffer)
585 toks = cl->buffer;
586 toks -= (long) qdisc_l2t(cl->rate, bytes);
587 if (toks <= -cl->mbuffer)
588 toks = 1 - cl->mbuffer;
589
590 cl->tokens = toks;
591}
592
593static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
594{
595 long toks = diff + cl->ctokens;
596
597 if (toks > cl->cbuffer)
598 toks = cl->cbuffer;
599 toks -= (long) qdisc_l2t(cl->ceil, bytes);
600 if (toks <= -cl->mbuffer)
601 toks = 1 - cl->mbuffer;
602
603 cl->ctokens = toks;
604}
605
606
607
608
609
610
611
612
613
614
615
616
617static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
618 int level, struct sk_buff *skb)
619{
620 int bytes = qdisc_pkt_len(skb);
621 enum htb_cmode old_mode;
622 long diff;
623
624 while (cl) {
625 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
626 if (cl->level >= level) {
627 if (cl->level == level)
628 cl->xstats.lends++;
629 htb_accnt_tokens(cl, bytes, diff);
630 } else {
631 cl->xstats.borrows++;
632 cl->tokens += diff;
633 }
634 htb_accnt_ctokens(cl, bytes, diff);
635 cl->t_c = q->now;
636
637 old_mode = cl->cmode;
638 diff = 0;
639 htb_change_class_mode(q, cl, &diff);
640 if (old_mode != cl->cmode) {
641 if (old_mode != HTB_CAN_SEND)
642 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
643 if (cl->cmode != HTB_CAN_SEND)
644 htb_add_to_wait_tree(q, cl, diff);
645 }
646
647
648 if (cl->level)
649 bstats_update(&cl->bstats, skb);
650
651 cl = cl->parent;
652 }
653}
654
655
656
657
658
659
660
661
662static psched_time_t htb_do_events(struct htb_sched *q, int level,
663 unsigned long start)
664{
665
666
667
668 unsigned long stop_at = start + 2;
669 while (time_before(jiffies, stop_at)) {
670 struct htb_class *cl;
671 long diff;
672 struct rb_node *p = rb_first(&q->wait_pq[level]);
673
674 if (!p)
675 return 0;
676
677 cl = rb_entry(p, struct htb_class, pq_node);
678 if (cl->pq_key > q->now)
679 return cl->pq_key;
680
681 htb_safe_rb_erase(p, q->wait_pq + level);
682 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
683 htb_change_class_mode(q, cl, &diff);
684 if (cl->cmode != HTB_CAN_SEND)
685 htb_add_to_wait_tree(q, cl, diff);
686 }
687
688
689 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
690 printk(KERN_WARNING "htb: too many events!\n");
691 q->warned |= HTB_WARN_TOOMANYEVENTS;
692 }
693
694 return q->now;
695}
696
697
698
699static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
700 u32 id)
701{
702 struct rb_node *r = NULL;
703 while (n) {
704 struct htb_class *cl =
705 rb_entry(n, struct htb_class, node[prio]);
706
707 if (id > cl->common.classid) {
708 n = n->rb_right;
709 } else if (id < cl->common.classid) {
710 r = n;
711 n = n->rb_left;
712 } else {
713 return n;
714 }
715 }
716 return r;
717}
718
719
720
721
722
723
724static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
725 struct rb_node **pptr, u32 * pid)
726{
727 int i;
728 struct {
729 struct rb_node *root;
730 struct rb_node **pptr;
731 u32 *pid;
732 } stk[TC_HTB_MAXDEPTH], *sp = stk;
733
734 BUG_ON(!tree->rb_node);
735 sp->root = tree->rb_node;
736 sp->pptr = pptr;
737 sp->pid = pid;
738
739 for (i = 0; i < 65535; i++) {
740 if (!*sp->pptr && *sp->pid) {
741
742
743 *sp->pptr =
744 htb_id_find_next_upper(prio, sp->root, *sp->pid);
745 }
746 *sp->pid = 0;
747
748 if (!*sp->pptr) {
749 *sp->pptr = sp->root;
750 while ((*sp->pptr)->rb_left)
751 *sp->pptr = (*sp->pptr)->rb_left;
752 if (sp > stk) {
753 sp--;
754 if (!*sp->pptr) {
755 WARN_ON(1);
756 return NULL;
757 }
758 htb_next_rb_node(sp->pptr);
759 }
760 } else {
761 struct htb_class *cl;
762 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
763 if (!cl->level)
764 return cl;
765 (++sp)->root = cl->un.inner.feed[prio].rb_node;
766 sp->pptr = cl->un.inner.ptr + prio;
767 sp->pid = cl->un.inner.last_ptr_id + prio;
768 }
769 }
770 WARN_ON(1);
771 return NULL;
772}
773
774
775
776static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
777 int level)
778{
779 struct sk_buff *skb = NULL;
780 struct htb_class *cl, *start;
781
782 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
783 q->ptr[level] + prio,
784 q->last_ptr_id[level] + prio);
785
786 do {
787next:
788 if (unlikely(!cl))
789 return NULL;
790
791
792
793
794
795 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
796 struct htb_class *next;
797 htb_deactivate(q, cl);
798
799
800 if ((q->row_mask[level] & (1 << prio)) == 0)
801 return NULL;
802
803 next = htb_lookup_leaf(q->row[level] + prio,
804 prio, q->ptr[level] + prio,
805 q->last_ptr_id[level] + prio);
806
807 if (cl == start)
808 start = next;
809 cl = next;
810 goto next;
811 }
812
813 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
814 if (likely(skb != NULL))
815 break;
816
817 qdisc_warn_nonwc("htb", cl->un.leaf.q);
818 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
819 ptr[0]) + prio);
820 cl = htb_lookup_leaf(q->row[level] + prio, prio,
821 q->ptr[level] + prio,
822 q->last_ptr_id[level] + prio);
823
824 } while (cl != start);
825
826 if (likely(skb != NULL)) {
827 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
828 if (cl->un.leaf.deficit[level] < 0) {
829 cl->un.leaf.deficit[level] += cl->quantum;
830 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
831 ptr[0]) + prio);
832 }
833
834
835 if (!cl->un.leaf.q->q.qlen)
836 htb_deactivate(q, cl);
837 htb_charge_class(q, cl, level, skb);
838 }
839 return skb;
840}
841
842static struct sk_buff *htb_dequeue(struct Qdisc *sch)
843{
844 struct sk_buff *skb;
845 struct htb_sched *q = qdisc_priv(sch);
846 int level;
847 psched_time_t next_event;
848 unsigned long start_at;
849
850
851 skb = __skb_dequeue(&q->direct_queue);
852 if (skb != NULL) {
853ok:
854 qdisc_bstats_update(sch, skb);
855 sch->flags &= ~TCQ_F_THROTTLED;
856 sch->q.qlen--;
857 return skb;
858 }
859
860 if (!sch->q.qlen)
861 goto fin;
862 q->now = psched_get_time();
863 start_at = jiffies;
864
865 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
866
867 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
868
869 int m;
870 psched_time_t event;
871
872 if (q->now >= q->near_ev_cache[level]) {
873 event = htb_do_events(q, level, start_at);
874 if (!event)
875 event = q->now + PSCHED_TICKS_PER_SEC;
876 q->near_ev_cache[level] = event;
877 } else
878 event = q->near_ev_cache[level];
879
880 if (next_event > event)
881 next_event = event;
882
883 m = ~q->row_mask[level];
884 while (m != (int)(-1)) {
885 int prio = ffz(m);
886 m |= 1 << prio;
887 skb = htb_dequeue_tree(q, prio, level);
888 if (likely(skb != NULL))
889 goto ok;
890 }
891 }
892 sch->qstats.overlimits++;
893 if (likely(next_event > q->now))
894 qdisc_watchdog_schedule(&q->watchdog, next_event);
895 else
896 schedule_work(&q->work);
897fin:
898 return skb;
899}
900
901
902static unsigned int htb_drop(struct Qdisc *sch)
903{
904 struct htb_sched *q = qdisc_priv(sch);
905 int prio;
906
907 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
908 struct list_head *p;
909 list_for_each(p, q->drops + prio) {
910 struct htb_class *cl = list_entry(p, struct htb_class,
911 un.leaf.drop_list);
912 unsigned int len;
913 if (cl->un.leaf.q->ops->drop &&
914 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
915 sch->q.qlen--;
916 if (!cl->un.leaf.q->q.qlen)
917 htb_deactivate(q, cl);
918 return len;
919 }
920 }
921 }
922 return 0;
923}
924
925
926
927static void htb_reset(struct Qdisc *sch)
928{
929 struct htb_sched *q = qdisc_priv(sch);
930 struct htb_class *cl;
931 struct hlist_node *n;
932 unsigned int i;
933
934 for (i = 0; i < q->clhash.hashsize; i++) {
935 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
936 if (cl->level)
937 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
938 else {
939 if (cl->un.leaf.q)
940 qdisc_reset(cl->un.leaf.q);
941 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
942 }
943 cl->prio_activity = 0;
944 cl->cmode = HTB_CAN_SEND;
945
946 }
947 }
948 qdisc_watchdog_cancel(&q->watchdog);
949 __skb_queue_purge(&q->direct_queue);
950 sch->q.qlen = 0;
951 memset(q->row, 0, sizeof(q->row));
952 memset(q->row_mask, 0, sizeof(q->row_mask));
953 memset(q->wait_pq, 0, sizeof(q->wait_pq));
954 memset(q->ptr, 0, sizeof(q->ptr));
955 for (i = 0; i < TC_HTB_NUMPRIO; i++)
956 INIT_LIST_HEAD(q->drops + i);
957}
958
959static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
960 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
961 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
962 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
963 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
964};
965
966static void htb_work_func(struct work_struct *work)
967{
968 struct htb_sched *q = container_of(work, struct htb_sched, work);
969 struct Qdisc *sch = q->watchdog.qdisc;
970
971 __netif_schedule(qdisc_root(sch));
972}
973
974static int htb_init(struct Qdisc *sch, struct nlattr *opt)
975{
976 struct htb_sched *q = qdisc_priv(sch);
977 struct nlattr *tb[TCA_HTB_INIT + 1];
978 struct tc_htb_glob *gopt;
979 int err;
980 int i;
981
982 if (!opt)
983 return -EINVAL;
984
985 err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
986 if (err < 0)
987 return err;
988
989 if (tb[TCA_HTB_INIT] == NULL) {
990 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
991 return -EINVAL;
992 }
993 gopt = nla_data(tb[TCA_HTB_INIT]);
994 if (gopt->version != HTB_VER >> 16) {
995 printk(KERN_ERR
996 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
997 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
998 return -EINVAL;
999 }
1000
1001 err = qdisc_class_hash_init(&q->clhash);
1002 if (err < 0)
1003 return err;
1004 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1005 INIT_LIST_HEAD(q->drops + i);
1006
1007 qdisc_watchdog_init(&q->watchdog, sch);
1008 INIT_WORK(&q->work, htb_work_func);
1009 skb_queue_head_init(&q->direct_queue);
1010
1011 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1012 if (q->direct_qlen < 2)
1013 q->direct_qlen = 2;
1014
1015 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1016 q->rate2quantum = 1;
1017 q->defcls = gopt->defcls;
1018
1019 return 0;
1020}
1021
1022static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1023{
1024 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1025 struct htb_sched *q = qdisc_priv(sch);
1026 struct nlattr *nest;
1027 struct tc_htb_glob gopt;
1028
1029 spin_lock_bh(root_lock);
1030
1031 gopt.direct_pkts = q->direct_pkts;
1032 gopt.version = HTB_VER;
1033 gopt.rate2quantum = q->rate2quantum;
1034 gopt.defcls = q->defcls;
1035 gopt.debug = 0;
1036
1037 nest = nla_nest_start(skb, TCA_OPTIONS);
1038 if (nest == NULL)
1039 goto nla_put_failure;
1040 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1041 nla_nest_end(skb, nest);
1042
1043 spin_unlock_bh(root_lock);
1044 return skb->len;
1045
1046nla_put_failure:
1047 spin_unlock_bh(root_lock);
1048 nla_nest_cancel(skb, nest);
1049 return -1;
1050}
1051
1052static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1053 struct sk_buff *skb, struct tcmsg *tcm)
1054{
1055 struct htb_class *cl = (struct htb_class *)arg;
1056 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1057 struct nlattr *nest;
1058 struct tc_htb_opt opt;
1059
1060 spin_lock_bh(root_lock);
1061 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1062 tcm->tcm_handle = cl->common.classid;
1063 if (!cl->level && cl->un.leaf.q)
1064 tcm->tcm_info = cl->un.leaf.q->handle;
1065
1066 nest = nla_nest_start(skb, TCA_OPTIONS);
1067 if (nest == NULL)
1068 goto nla_put_failure;
1069
1070 memset(&opt, 0, sizeof(opt));
1071
1072 opt.rate = cl->rate->rate;
1073 opt.buffer = cl->buffer;
1074 opt.ceil = cl->ceil->rate;
1075 opt.cbuffer = cl->cbuffer;
1076 opt.quantum = cl->quantum;
1077 opt.prio = cl->prio;
1078 opt.level = cl->level;
1079 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1080
1081 nla_nest_end(skb, nest);
1082 spin_unlock_bh(root_lock);
1083 return skb->len;
1084
1085nla_put_failure:
1086 spin_unlock_bh(root_lock);
1087 nla_nest_cancel(skb, nest);
1088 return -1;
1089}
1090
1091static int
1092htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1093{
1094 struct htb_class *cl = (struct htb_class *)arg;
1095
1096 if (!cl->level && cl->un.leaf.q)
1097 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1098 cl->xstats.tokens = cl->tokens;
1099 cl->xstats.ctokens = cl->ctokens;
1100
1101 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1102 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1103 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1104 return -1;
1105
1106 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1107}
1108
1109static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1110 struct Qdisc **old)
1111{
1112 struct htb_class *cl = (struct htb_class *)arg;
1113
1114 if (cl->level)
1115 return -EINVAL;
1116 if (new == NULL &&
1117 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1118 cl->common.classid)) == NULL)
1119 return -ENOBUFS;
1120
1121 sch_tree_lock(sch);
1122 *old = cl->un.leaf.q;
1123 cl->un.leaf.q = new;
1124 if (*old != NULL) {
1125 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1126 qdisc_reset(*old);
1127 }
1128 sch_tree_unlock(sch);
1129 return 0;
1130}
1131
1132static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1133{
1134 struct htb_class *cl = (struct htb_class *)arg;
1135 return !cl->level ? cl->un.leaf.q : NULL;
1136}
1137
1138static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1139{
1140 struct htb_class *cl = (struct htb_class *)arg;
1141
1142 if (cl->un.leaf.q->q.qlen == 0)
1143 htb_deactivate(qdisc_priv(sch), cl);
1144}
1145
1146static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1147{
1148 struct htb_class *cl = htb_find(classid, sch);
1149 if (cl)
1150 cl->refcnt++;
1151 return (unsigned long)cl;
1152}
1153
1154static inline int htb_parent_last_child(struct htb_class *cl)
1155{
1156 if (!cl->parent)
1157
1158 return 0;
1159 if (cl->parent->children > 1)
1160
1161 return 0;
1162 return 1;
1163}
1164
1165static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1166 struct Qdisc *new_q)
1167{
1168 struct htb_class *parent = cl->parent;
1169
1170 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1171
1172 if (parent->cmode != HTB_CAN_SEND)
1173 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1174
1175 parent->level = 0;
1176 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1177 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1178 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1179 parent->tokens = parent->buffer;
1180 parent->ctokens = parent->cbuffer;
1181 parent->t_c = psched_get_time();
1182 parent->cmode = HTB_CAN_SEND;
1183}
1184
1185static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1186{
1187 if (!cl->level) {
1188 WARN_ON(!cl->un.leaf.q);
1189 qdisc_destroy(cl->un.leaf.q);
1190 }
1191 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1192 qdisc_put_rtab(cl->rate);
1193 qdisc_put_rtab(cl->ceil);
1194
1195 tcf_destroy_chain(&cl->filter_list);
1196 kfree(cl);
1197}
1198
1199static void htb_destroy(struct Qdisc *sch)
1200{
1201 struct htb_sched *q = qdisc_priv(sch);
1202 struct hlist_node *n, *next;
1203 struct htb_class *cl;
1204 unsigned int i;
1205
1206 cancel_work_sync(&q->work);
1207 qdisc_watchdog_cancel(&q->watchdog);
1208
1209
1210
1211
1212 tcf_destroy_chain(&q->filter_list);
1213
1214 for (i = 0; i < q->clhash.hashsize; i++) {
1215 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1216 tcf_destroy_chain(&cl->filter_list);
1217 }
1218 for (i = 0; i < q->clhash.hashsize; i++) {
1219 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1220 common.hnode)
1221 htb_destroy_class(sch, cl);
1222 }
1223 qdisc_class_hash_destroy(&q->clhash);
1224 __skb_queue_purge(&q->direct_queue);
1225}
1226
1227static int htb_delete(struct Qdisc *sch, unsigned long arg)
1228{
1229 struct htb_sched *q = qdisc_priv(sch);
1230 struct htb_class *cl = (struct htb_class *)arg;
1231 unsigned int qlen;
1232 struct Qdisc *new_q = NULL;
1233 int last_child = 0;
1234
1235
1236
1237
1238 if (cl->children || cl->filter_cnt)
1239 return -EBUSY;
1240
1241 if (!cl->level && htb_parent_last_child(cl)) {
1242 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1243 cl->parent->common.classid);
1244 last_child = 1;
1245 }
1246
1247 sch_tree_lock(sch);
1248
1249 if (!cl->level) {
1250 qlen = cl->un.leaf.q->q.qlen;
1251 qdisc_reset(cl->un.leaf.q);
1252 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1253 }
1254
1255
1256 qdisc_class_hash_remove(&q->clhash, &cl->common);
1257 if (cl->parent)
1258 cl->parent->children--;
1259
1260 if (cl->prio_activity)
1261 htb_deactivate(q, cl);
1262
1263 if (cl->cmode != HTB_CAN_SEND)
1264 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1265
1266 if (last_child)
1267 htb_parent_to_leaf(q, cl, new_q);
1268
1269 BUG_ON(--cl->refcnt == 0);
1270
1271
1272
1273
1274
1275 sch_tree_unlock(sch);
1276 return 0;
1277}
1278
1279static void htb_put(struct Qdisc *sch, unsigned long arg)
1280{
1281 struct htb_class *cl = (struct htb_class *)arg;
1282
1283 if (--cl->refcnt == 0)
1284 htb_destroy_class(sch, cl);
1285}
1286
1287static int htb_change_class(struct Qdisc *sch, u32 classid,
1288 u32 parentid, struct nlattr **tca,
1289 unsigned long *arg)
1290{
1291 int err = -EINVAL;
1292 struct htb_sched *q = qdisc_priv(sch);
1293 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1294 struct nlattr *opt = tca[TCA_OPTIONS];
1295 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1296 struct nlattr *tb[__TCA_HTB_MAX];
1297 struct tc_htb_opt *hopt;
1298
1299
1300 if (!opt)
1301 goto failure;
1302
1303 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1304 if (err < 0)
1305 goto failure;
1306
1307 err = -EINVAL;
1308 if (tb[TCA_HTB_PARMS] == NULL)
1309 goto failure;
1310
1311 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1312
1313 hopt = nla_data(tb[TCA_HTB_PARMS]);
1314
1315 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1316 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1317 if (!rtab || !ctab)
1318 goto failure;
1319
1320 if (!cl) {
1321 struct Qdisc *new_q;
1322 int prio;
1323 struct {
1324 struct nlattr nla;
1325 struct gnet_estimator opt;
1326 } est = {
1327 .nla = {
1328 .nla_len = nla_attr_size(sizeof(est.opt)),
1329 .nla_type = TCA_RATE,
1330 },
1331 .opt = {
1332
1333 .interval = 2,
1334 .ewma_log = 2,
1335 },
1336 };
1337
1338
1339 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1340 htb_find(classid, sch))
1341 goto failure;
1342
1343
1344 if (parent && parent->parent && parent->parent->level < 2) {
1345 printk(KERN_ERR "htb: tree is too deep\n");
1346 goto failure;
1347 }
1348 err = -ENOBUFS;
1349 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1350 goto failure;
1351
1352 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1353 qdisc_root_sleeping_lock(sch),
1354 tca[TCA_RATE] ? : &est.nla);
1355 if (err) {
1356 kfree(cl);
1357 goto failure;
1358 }
1359
1360 cl->refcnt = 1;
1361 cl->children = 0;
1362 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1363 RB_CLEAR_NODE(&cl->pq_node);
1364
1365 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1366 RB_CLEAR_NODE(&cl->node[prio]);
1367
1368
1369
1370
1371 new_q = qdisc_create_dflt(sch->dev_queue,
1372 &pfifo_qdisc_ops, classid);
1373 sch_tree_lock(sch);
1374 if (parent && !parent->level) {
1375 unsigned int qlen = parent->un.leaf.q->q.qlen;
1376
1377
1378 qdisc_reset(parent->un.leaf.q);
1379 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1380 qdisc_destroy(parent->un.leaf.q);
1381 if (parent->prio_activity)
1382 htb_deactivate(q, parent);
1383
1384
1385 if (parent->cmode != HTB_CAN_SEND) {
1386 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1387 parent->cmode = HTB_CAN_SEND;
1388 }
1389 parent->level = (parent->parent ? parent->parent->level
1390 : TC_HTB_MAXDEPTH) - 1;
1391 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1392 }
1393
1394 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1395
1396 cl->common.classid = classid;
1397 cl->parent = parent;
1398
1399
1400 cl->tokens = hopt->buffer;
1401 cl->ctokens = hopt->cbuffer;
1402 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;
1403 cl->t_c = psched_get_time();
1404 cl->cmode = HTB_CAN_SEND;
1405
1406
1407 qdisc_class_hash_insert(&q->clhash, &cl->common);
1408 if (parent)
1409 parent->children++;
1410 } else {
1411 if (tca[TCA_RATE]) {
1412 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1413 qdisc_root_sleeping_lock(sch),
1414 tca[TCA_RATE]);
1415 if (err)
1416 return err;
1417 }
1418 sch_tree_lock(sch);
1419 }
1420
1421
1422
1423 if (!cl->level) {
1424 cl->quantum = rtab->rate.rate / q->rate2quantum;
1425 if (!hopt->quantum && cl->quantum < 1000) {
1426 printk(KERN_WARNING
1427 "HTB: quantum of class %X is small. Consider r2q change.\n",
1428 cl->common.classid);
1429 cl->quantum = 1000;
1430 }
1431 if (!hopt->quantum && cl->quantum > 200000) {
1432 printk(KERN_WARNING
1433 "HTB: quantum of class %X is big. Consider r2q change.\n",
1434 cl->common.classid);
1435 cl->quantum = 200000;
1436 }
1437 if (hopt->quantum)
1438 cl->quantum = hopt->quantum;
1439 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1440 cl->prio = TC_HTB_NUMPRIO - 1;
1441 }
1442
1443 cl->buffer = hopt->buffer;
1444 cl->cbuffer = hopt->cbuffer;
1445 if (cl->rate)
1446 qdisc_put_rtab(cl->rate);
1447 cl->rate = rtab;
1448 if (cl->ceil)
1449 qdisc_put_rtab(cl->ceil);
1450 cl->ceil = ctab;
1451 sch_tree_unlock(sch);
1452
1453 qdisc_class_hash_grow(sch, &q->clhash);
1454
1455 *arg = (unsigned long)cl;
1456 return 0;
1457
1458failure:
1459 if (rtab)
1460 qdisc_put_rtab(rtab);
1461 if (ctab)
1462 qdisc_put_rtab(ctab);
1463 return err;
1464}
1465
1466static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1467{
1468 struct htb_sched *q = qdisc_priv(sch);
1469 struct htb_class *cl = (struct htb_class *)arg;
1470 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1471
1472 return fl;
1473}
1474
1475static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1476 u32 classid)
1477{
1478 struct htb_class *cl = htb_find(classid, sch);
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489 if (cl)
1490 cl->filter_cnt++;
1491 return (unsigned long)cl;
1492}
1493
1494static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1495{
1496 struct htb_class *cl = (struct htb_class *)arg;
1497
1498 if (cl)
1499 cl->filter_cnt--;
1500}
1501
1502static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1503{
1504 struct htb_sched *q = qdisc_priv(sch);
1505 struct htb_class *cl;
1506 struct hlist_node *n;
1507 unsigned int i;
1508
1509 if (arg->stop)
1510 return;
1511
1512 for (i = 0; i < q->clhash.hashsize; i++) {
1513 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1514 if (arg->count < arg->skip) {
1515 arg->count++;
1516 continue;
1517 }
1518 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1519 arg->stop = 1;
1520 return;
1521 }
1522 arg->count++;
1523 }
1524 }
1525}
1526
1527static const struct Qdisc_class_ops htb_class_ops = {
1528 .graft = htb_graft,
1529 .leaf = htb_leaf,
1530 .qlen_notify = htb_qlen_notify,
1531 .get = htb_get,
1532 .put = htb_put,
1533 .change = htb_change_class,
1534 .delete = htb_delete,
1535 .walk = htb_walk,
1536 .tcf_chain = htb_find_tcf,
1537 .bind_tcf = htb_bind_filter,
1538 .unbind_tcf = htb_unbind_filter,
1539 .dump = htb_dump_class,
1540 .dump_stats = htb_dump_class_stats,
1541};
1542
1543static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1544 .cl_ops = &htb_class_ops,
1545 .id = "htb",
1546 .priv_size = sizeof(struct htb_sched),
1547 .enqueue = htb_enqueue,
1548 .dequeue = htb_dequeue,
1549 .peek = qdisc_peek_dequeued,
1550 .drop = htb_drop,
1551 .init = htb_init,
1552 .reset = htb_reset,
1553 .destroy = htb_destroy,
1554 .dump = htb_dump,
1555 .owner = THIS_MODULE,
1556};
1557
1558static int __init htb_module_init(void)
1559{
1560 return register_qdisc(&htb_qdisc_ops);
1561}
1562static void __exit htb_module_exit(void)
1563{
1564 unregister_qdisc(&htb_qdisc_ops);
1565}
1566
1567module_init(htb_module_init)
1568module_exit(htb_module_exit)
1569MODULE_LICENSE("GPL");
1570