1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/types.h>
55#include <linux/errno.h>
56#include <linux/compiler.h>
57#include <linux/spinlock.h>
58#include <linux/skbuff.h>
59#include <linux/string.h>
60#include <linux/slab.h>
61#include <linux/list.h>
62#include <linux/rbtree.h>
63#include <linux/init.h>
64#include <linux/rtnetlink.h>
65#include <linux/pkt_sched.h>
66#include <net/netlink.h>
67#include <net/pkt_sched.h>
68#include <net/pkt_cls.h>
69#include <asm/div64.h>
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84struct internal_sc {
85 u64 sm1;
86 u64 ism1;
87 u64 dx;
88 u64 dy;
89 u64 sm2;
90 u64 ism2;
91};
92
93
94struct runtime_sc {
95 u64 x;
96 u64 y;
97 u64 sm1;
98 u64 ism1;
99 u64 dx;
100 u64 dy;
101 u64 sm2;
102 u64 ism2;
103};
104
105enum hfsc_class_flags {
106 HFSC_RSC = 0x1,
107 HFSC_FSC = 0x2,
108 HFSC_USC = 0x4
109};
110
111struct hfsc_class {
112 struct Qdisc_class_common cl_common;
113
114 struct gnet_stats_basic_packed bstats;
115 struct gnet_stats_queue qstats;
116 struct net_rate_estimator __rcu *rate_est;
117 struct tcf_proto __rcu *filter_list;
118 struct tcf_block *block;
119 unsigned int filter_cnt;
120 unsigned int level;
121
122 struct hfsc_sched *sched;
123 struct hfsc_class *cl_parent;
124 struct list_head siblings;
125 struct list_head children;
126 struct Qdisc *qdisc;
127
128 struct rb_node el_node;
129 struct rb_root vt_tree;
130 struct rb_node vt_node;
131 struct rb_root cf_tree;
132 struct rb_node cf_node;
133
134 u64 cl_total;
135 u64 cl_cumul;
136
137
138 u64 cl_d;
139 u64 cl_e;
140 u64 cl_vt;
141 u64 cl_f;
142
143 u64 cl_myf;
144
145 u64 cl_cfmin;
146
147 u64 cl_cvtmin;
148
149
150 u64 cl_vtadj;
151
152 u64 cl_cvtoff;
153
154
155 struct internal_sc cl_rsc;
156 struct internal_sc cl_fsc;
157 struct internal_sc cl_usc;
158 struct runtime_sc cl_deadline;
159 struct runtime_sc cl_eligible;
160 struct runtime_sc cl_virtual;
161 struct runtime_sc cl_ulimit;
162
163 u8 cl_flags;
164 u32 cl_vtperiod;
165 u32 cl_parentperiod;
166 u32 cl_nactive;
167};
168
169struct hfsc_sched {
170 u16 defcls;
171 struct hfsc_class root;
172 struct Qdisc_class_hash clhash;
173 struct rb_root eligible;
174 struct qdisc_watchdog watchdog;
175};
176
177#define HT_INFINITY 0xffffffffffffffffULL
178
179
180
181
182
183
184
185static void
186eltree_insert(struct hfsc_class *cl)
187{
188 struct rb_node **p = &cl->sched->eligible.rb_node;
189 struct rb_node *parent = NULL;
190 struct hfsc_class *cl1;
191
192 while (*p != NULL) {
193 parent = *p;
194 cl1 = rb_entry(parent, struct hfsc_class, el_node);
195 if (cl->cl_e >= cl1->cl_e)
196 p = &parent->rb_right;
197 else
198 p = &parent->rb_left;
199 }
200 rb_link_node(&cl->el_node, parent, p);
201 rb_insert_color(&cl->el_node, &cl->sched->eligible);
202}
203
204static inline void
205eltree_remove(struct hfsc_class *cl)
206{
207 rb_erase(&cl->el_node, &cl->sched->eligible);
208}
209
210static inline void
211eltree_update(struct hfsc_class *cl)
212{
213 eltree_remove(cl);
214 eltree_insert(cl);
215}
216
217
218static inline struct hfsc_class *
219eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
220{
221 struct hfsc_class *p, *cl = NULL;
222 struct rb_node *n;
223
224 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
225 p = rb_entry(n, struct hfsc_class, el_node);
226 if (p->cl_e > cur_time)
227 break;
228 if (cl == NULL || p->cl_d < cl->cl_d)
229 cl = p;
230 }
231 return cl;
232}
233
234
235static inline struct hfsc_class *
236eltree_get_minel(struct hfsc_sched *q)
237{
238 struct rb_node *n;
239
240 n = rb_first(&q->eligible);
241 if (n == NULL)
242 return NULL;
243 return rb_entry(n, struct hfsc_class, el_node);
244}
245
246
247
248
249
250static void
251vttree_insert(struct hfsc_class *cl)
252{
253 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
254 struct rb_node *parent = NULL;
255 struct hfsc_class *cl1;
256
257 while (*p != NULL) {
258 parent = *p;
259 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
260 if (cl->cl_vt >= cl1->cl_vt)
261 p = &parent->rb_right;
262 else
263 p = &parent->rb_left;
264 }
265 rb_link_node(&cl->vt_node, parent, p);
266 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
267}
268
269static inline void
270vttree_remove(struct hfsc_class *cl)
271{
272 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
273}
274
275static inline void
276vttree_update(struct hfsc_class *cl)
277{
278 vttree_remove(cl);
279 vttree_insert(cl);
280}
281
282static inline struct hfsc_class *
283vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
284{
285 struct hfsc_class *p;
286 struct rb_node *n;
287
288 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
289 p = rb_entry(n, struct hfsc_class, vt_node);
290 if (p->cl_f <= cur_time)
291 return p;
292 }
293 return NULL;
294}
295
296
297
298
299static struct hfsc_class *
300vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
301{
302
303 if (cl->cl_cfmin > cur_time)
304 return NULL;
305
306 while (cl->level > 0) {
307 cl = vttree_firstfit(cl, cur_time);
308 if (cl == NULL)
309 return NULL;
310
311
312
313 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
314 cl->cl_parent->cl_cvtmin = cl->cl_vt;
315 }
316 return cl;
317}
318
319static void
320cftree_insert(struct hfsc_class *cl)
321{
322 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
323 struct rb_node *parent = NULL;
324 struct hfsc_class *cl1;
325
326 while (*p != NULL) {
327 parent = *p;
328 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
329 if (cl->cl_f >= cl1->cl_f)
330 p = &parent->rb_right;
331 else
332 p = &parent->rb_left;
333 }
334 rb_link_node(&cl->cf_node, parent, p);
335 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
336}
337
338static inline void
339cftree_remove(struct hfsc_class *cl)
340{
341 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
342}
343
344static inline void
345cftree_update(struct hfsc_class *cl)
346{
347 cftree_remove(cl);
348 cftree_insert(cl);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376#define SM_SHIFT (30 - PSCHED_SHIFT)
377#define ISM_SHIFT (8 + PSCHED_SHIFT)
378
379#define SM_MASK ((1ULL << SM_SHIFT) - 1)
380#define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
381
382static inline u64
383seg_x2y(u64 x, u64 sm)
384{
385 u64 y;
386
387
388
389
390
391
392 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
393 return y;
394}
395
396static inline u64
397seg_y2x(u64 y, u64 ism)
398{
399 u64 x;
400
401 if (y == 0)
402 x = 0;
403 else if (ism == HT_INFINITY)
404 x = HT_INFINITY;
405 else {
406 x = (y >> ISM_SHIFT) * ism
407 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
408 }
409 return x;
410}
411
412
413static u64
414m2sm(u32 m)
415{
416 u64 sm;
417
418 sm = ((u64)m << SM_SHIFT);
419 sm += PSCHED_TICKS_PER_SEC - 1;
420 do_div(sm, PSCHED_TICKS_PER_SEC);
421 return sm;
422}
423
424
425static u64
426m2ism(u32 m)
427{
428 u64 ism;
429
430 if (m == 0)
431 ism = HT_INFINITY;
432 else {
433 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
434 ism += m - 1;
435 do_div(ism, m);
436 }
437 return ism;
438}
439
440
441static u64
442d2dx(u32 d)
443{
444 u64 dx;
445
446 dx = ((u64)d * PSCHED_TICKS_PER_SEC);
447 dx += USEC_PER_SEC - 1;
448 do_div(dx, USEC_PER_SEC);
449 return dx;
450}
451
452
453static u32
454sm2m(u64 sm)
455{
456 u64 m;
457
458 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
459 return (u32)m;
460}
461
462
463static u32
464dx2d(u64 dx)
465{
466 u64 d;
467
468 d = dx * USEC_PER_SEC;
469 do_div(d, PSCHED_TICKS_PER_SEC);
470 return (u32)d;
471}
472
473static void
474sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
475{
476 isc->sm1 = m2sm(sc->m1);
477 isc->ism1 = m2ism(sc->m1);
478 isc->dx = d2dx(sc->d);
479 isc->dy = seg_x2y(isc->dx, isc->sm1);
480 isc->sm2 = m2sm(sc->m2);
481 isc->ism2 = m2ism(sc->m2);
482}
483
484
485
486
487
488static void
489rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
490{
491 rtsc->x = x;
492 rtsc->y = y;
493 rtsc->sm1 = isc->sm1;
494 rtsc->ism1 = isc->ism1;
495 rtsc->dx = isc->dx;
496 rtsc->dy = isc->dy;
497 rtsc->sm2 = isc->sm2;
498 rtsc->ism2 = isc->ism2;
499}
500
501
502
503
504
505static u64
506rtsc_y2x(struct runtime_sc *rtsc, u64 y)
507{
508 u64 x;
509
510 if (y < rtsc->y)
511 x = rtsc->x;
512 else if (y <= rtsc->y + rtsc->dy) {
513
514 if (rtsc->dy == 0)
515 x = rtsc->x + rtsc->dx;
516 else
517 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
518 } else {
519
520 x = rtsc->x + rtsc->dx
521 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
522 }
523 return x;
524}
525
526static u64
527rtsc_x2y(struct runtime_sc *rtsc, u64 x)
528{
529 u64 y;
530
531 if (x <= rtsc->x)
532 y = rtsc->y;
533 else if (x <= rtsc->x + rtsc->dx)
534
535 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
536 else
537
538 y = rtsc->y + rtsc->dy
539 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
540 return y;
541}
542
543
544
545
546
547static void
548rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
549{
550 u64 y1, y2, dx, dy;
551 u32 dsm;
552
553 if (isc->sm1 <= isc->sm2) {
554
555 y1 = rtsc_x2y(rtsc, x);
556 if (y1 < y)
557
558 return;
559 rtsc->x = x;
560 rtsc->y = y;
561 return;
562 }
563
564
565
566
567
568
569
570 y1 = rtsc_x2y(rtsc, x);
571 if (y1 <= y) {
572
573 return;
574 }
575
576 y2 = rtsc_x2y(rtsc, x + isc->dx);
577 if (y2 >= y + isc->dy) {
578
579 rtsc->x = x;
580 rtsc->y = y;
581 rtsc->dx = isc->dx;
582 rtsc->dy = isc->dy;
583 return;
584 }
585
586
587
588
589
590
591
592 dx = (y1 - y) << SM_SHIFT;
593 dsm = isc->sm1 - isc->sm2;
594 do_div(dx, dsm);
595
596
597
598
599 if (rtsc->x + rtsc->dx > x)
600 dx += rtsc->x + rtsc->dx - x;
601 dy = seg_x2y(dx, isc->sm1);
602
603 rtsc->x = x;
604 rtsc->y = y;
605 rtsc->dx = dx;
606 rtsc->dy = dy;
607}
608
609static void
610init_ed(struct hfsc_class *cl, unsigned int next_len)
611{
612 u64 cur_time = psched_get_time();
613
614
615 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
616
617
618
619
620
621
622 cl->cl_eligible = cl->cl_deadline;
623 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
624 cl->cl_eligible.dx = 0;
625 cl->cl_eligible.dy = 0;
626 }
627
628
629 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
630 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
631
632 eltree_insert(cl);
633}
634
635static void
636update_ed(struct hfsc_class *cl, unsigned int next_len)
637{
638 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
639 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
640
641 eltree_update(cl);
642}
643
644static inline void
645update_d(struct hfsc_class *cl, unsigned int next_len)
646{
647 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
648}
649
650static inline void
651update_cfmin(struct hfsc_class *cl)
652{
653 struct rb_node *n = rb_first(&cl->cf_tree);
654 struct hfsc_class *p;
655
656 if (n == NULL) {
657 cl->cl_cfmin = 0;
658 return;
659 }
660 p = rb_entry(n, struct hfsc_class, cf_node);
661 cl->cl_cfmin = p->cl_f;
662}
663
664static void
665init_vf(struct hfsc_class *cl, unsigned int len)
666{
667 struct hfsc_class *max_cl;
668 struct rb_node *n;
669 u64 vt, f, cur_time;
670 int go_active;
671
672 cur_time = 0;
673 go_active = 1;
674 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
675 if (go_active && cl->cl_nactive++ == 0)
676 go_active = 1;
677 else
678 go_active = 0;
679
680 if (go_active) {
681 n = rb_last(&cl->cl_parent->vt_tree);
682 if (n != NULL) {
683 max_cl = rb_entry(n, struct hfsc_class, vt_node);
684
685
686
687
688
689 vt = max_cl->cl_vt;
690 if (cl->cl_parent->cl_cvtmin != 0)
691 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
692
693 if (cl->cl_parent->cl_vtperiod !=
694 cl->cl_parentperiod || vt > cl->cl_vt)
695 cl->cl_vt = vt;
696 } else {
697
698
699
700
701
702
703 cl->cl_vt = cl->cl_parent->cl_cvtoff;
704 cl->cl_parent->cl_cvtmin = 0;
705 }
706
707
708 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
709 cl->cl_vtadj = 0;
710
711 cl->cl_vtperiod++;
712 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
713 if (cl->cl_parent->cl_nactive == 0)
714 cl->cl_parentperiod++;
715 cl->cl_f = 0;
716
717 vttree_insert(cl);
718 cftree_insert(cl);
719
720 if (cl->cl_flags & HFSC_USC) {
721
722 if (cur_time == 0)
723 cur_time = psched_get_time();
724
725
726 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
727 cl->cl_total);
728
729 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
730 cl->cl_total);
731 }
732 }
733
734 f = max(cl->cl_myf, cl->cl_cfmin);
735 if (f != cl->cl_f) {
736 cl->cl_f = f;
737 cftree_update(cl);
738 }
739 update_cfmin(cl->cl_parent);
740 }
741}
742
743static void
744update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
745{
746 u64 f;
747 int go_passive = 0;
748
749 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
750 go_passive = 1;
751
752 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
753 cl->cl_total += len;
754
755 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
756 continue;
757
758 if (go_passive && --cl->cl_nactive == 0)
759 go_passive = 1;
760 else
761 go_passive = 0;
762
763
764 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
765
766
767
768
769
770
771 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
772 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
773 cl->cl_vt = cl->cl_parent->cl_cvtmin;
774 }
775
776 if (go_passive) {
777
778
779
780 if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
781 cl->cl_parent->cl_cvtoff = cl->cl_vt;
782
783
784 vttree_remove(cl);
785
786 cftree_remove(cl);
787 update_cfmin(cl->cl_parent);
788
789 continue;
790 }
791
792
793 vttree_update(cl);
794
795
796 if (cl->cl_flags & HFSC_USC) {
797 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
798#if 0
799 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
800 cl->cl_total);
801
802
803
804
805
806
807
808
809
810
811
812
813 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
814 if (cl->cl_myf < myf_bound) {
815 delta = cur_time - cl->cl_myf;
816 cl->cl_myfadj += delta;
817 cl->cl_myf += delta;
818 }
819#endif
820 }
821
822 f = max(cl->cl_myf, cl->cl_cfmin);
823 if (f != cl->cl_f) {
824 cl->cl_f = f;
825 cftree_update(cl);
826 update_cfmin(cl->cl_parent);
827 }
828 }
829}
830
831static unsigned int
832qdisc_peek_len(struct Qdisc *sch)
833{
834 struct sk_buff *skb;
835 unsigned int len;
836
837 skb = sch->ops->peek(sch);
838 if (unlikely(skb == NULL)) {
839 qdisc_warn_nonwc("qdisc_peek_len", sch);
840 return 0;
841 }
842 len = qdisc_pkt_len(skb);
843
844 return len;
845}
846
847static void
848hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
849{
850 unsigned int len = cl->qdisc->q.qlen;
851 unsigned int backlog = cl->qdisc->qstats.backlog;
852
853 qdisc_reset(cl->qdisc);
854 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
855}
856
857static void
858hfsc_adjust_levels(struct hfsc_class *cl)
859{
860 struct hfsc_class *p;
861 unsigned int level;
862
863 do {
864 level = 0;
865 list_for_each_entry(p, &cl->children, siblings) {
866 if (p->level >= level)
867 level = p->level + 1;
868 }
869 cl->level = level;
870 } while ((cl = cl->cl_parent) != NULL);
871}
872
873static inline struct hfsc_class *
874hfsc_find_class(u32 classid, struct Qdisc *sch)
875{
876 struct hfsc_sched *q = qdisc_priv(sch);
877 struct Qdisc_class_common *clc;
878
879 clc = qdisc_class_find(&q->clhash, classid);
880 if (clc == NULL)
881 return NULL;
882 return container_of(clc, struct hfsc_class, cl_common);
883}
884
885static void
886hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
887 u64 cur_time)
888{
889 sc2isc(rsc, &cl->cl_rsc);
890 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
891 cl->cl_eligible = cl->cl_deadline;
892 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
893 cl->cl_eligible.dx = 0;
894 cl->cl_eligible.dy = 0;
895 }
896 cl->cl_flags |= HFSC_RSC;
897}
898
899static void
900hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
901{
902 sc2isc(fsc, &cl->cl_fsc);
903 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
904 cl->cl_flags |= HFSC_FSC;
905}
906
907static void
908hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
909 u64 cur_time)
910{
911 sc2isc(usc, &cl->cl_usc);
912 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
913 cl->cl_flags |= HFSC_USC;
914}
915
916static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
917 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
918 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
919 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
920};
921
922static int
923hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
924 struct nlattr **tca, unsigned long *arg)
925{
926 struct hfsc_sched *q = qdisc_priv(sch);
927 struct hfsc_class *cl = (struct hfsc_class *)*arg;
928 struct hfsc_class *parent = NULL;
929 struct nlattr *opt = tca[TCA_OPTIONS];
930 struct nlattr *tb[TCA_HFSC_MAX + 1];
931 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
932 u64 cur_time;
933 int err;
934
935 if (opt == NULL)
936 return -EINVAL;
937
938 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy, NULL);
939 if (err < 0)
940 return err;
941
942 if (tb[TCA_HFSC_RSC]) {
943 rsc = nla_data(tb[TCA_HFSC_RSC]);
944 if (rsc->m1 == 0 && rsc->m2 == 0)
945 rsc = NULL;
946 }
947
948 if (tb[TCA_HFSC_FSC]) {
949 fsc = nla_data(tb[TCA_HFSC_FSC]);
950 if (fsc->m1 == 0 && fsc->m2 == 0)
951 fsc = NULL;
952 }
953
954 if (tb[TCA_HFSC_USC]) {
955 usc = nla_data(tb[TCA_HFSC_USC]);
956 if (usc->m1 == 0 && usc->m2 == 0)
957 usc = NULL;
958 }
959
960 if (cl != NULL) {
961 int old_flags;
962
963 if (parentid) {
964 if (cl->cl_parent &&
965 cl->cl_parent->cl_common.classid != parentid)
966 return -EINVAL;
967 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
968 return -EINVAL;
969 }
970 cur_time = psched_get_time();
971
972 if (tca[TCA_RATE]) {
973 err = gen_replace_estimator(&cl->bstats, NULL,
974 &cl->rate_est,
975 NULL,
976 qdisc_root_sleeping_running(sch),
977 tca[TCA_RATE]);
978 if (err)
979 return err;
980 }
981
982 sch_tree_lock(sch);
983 old_flags = cl->cl_flags;
984
985 if (rsc != NULL)
986 hfsc_change_rsc(cl, rsc, cur_time);
987 if (fsc != NULL)
988 hfsc_change_fsc(cl, fsc);
989 if (usc != NULL)
990 hfsc_change_usc(cl, usc, cur_time);
991
992 if (cl->qdisc->q.qlen != 0) {
993 int len = qdisc_peek_len(cl->qdisc);
994
995 if (cl->cl_flags & HFSC_RSC) {
996 if (old_flags & HFSC_RSC)
997 update_ed(cl, len);
998 else
999 init_ed(cl, len);
1000 }
1001
1002 if (cl->cl_flags & HFSC_FSC) {
1003 if (old_flags & HFSC_FSC)
1004 update_vf(cl, 0, cur_time);
1005 else
1006 init_vf(cl, len);
1007 }
1008 }
1009 sch_tree_unlock(sch);
1010
1011 return 0;
1012 }
1013
1014 if (parentid == TC_H_ROOT)
1015 return -EEXIST;
1016
1017 parent = &q->root;
1018 if (parentid) {
1019 parent = hfsc_find_class(parentid, sch);
1020 if (parent == NULL)
1021 return -ENOENT;
1022 }
1023
1024 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1025 return -EINVAL;
1026 if (hfsc_find_class(classid, sch))
1027 return -EEXIST;
1028
1029 if (rsc == NULL && fsc == NULL)
1030 return -EINVAL;
1031
1032 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1033 if (cl == NULL)
1034 return -ENOBUFS;
1035
1036 err = tcf_block_get(&cl->block, &cl->filter_list, sch);
1037 if (err) {
1038 kfree(cl);
1039 return err;
1040 }
1041
1042 if (tca[TCA_RATE]) {
1043 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1044 NULL,
1045 qdisc_root_sleeping_running(sch),
1046 tca[TCA_RATE]);
1047 if (err) {
1048 tcf_block_put(cl->block);
1049 kfree(cl);
1050 return err;
1051 }
1052 }
1053
1054 if (rsc != NULL)
1055 hfsc_change_rsc(cl, rsc, 0);
1056 if (fsc != NULL)
1057 hfsc_change_fsc(cl, fsc);
1058 if (usc != NULL)
1059 hfsc_change_usc(cl, usc, 0);
1060
1061 cl->cl_common.classid = classid;
1062 cl->sched = q;
1063 cl->cl_parent = parent;
1064 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1065 &pfifo_qdisc_ops, classid);
1066 if (cl->qdisc == NULL)
1067 cl->qdisc = &noop_qdisc;
1068 else
1069 qdisc_hash_add(cl->qdisc, true);
1070 INIT_LIST_HEAD(&cl->children);
1071 cl->vt_tree = RB_ROOT;
1072 cl->cf_tree = RB_ROOT;
1073
1074 sch_tree_lock(sch);
1075 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1076 list_add_tail(&cl->siblings, &parent->children);
1077 if (parent->level == 0)
1078 hfsc_purge_queue(sch, parent);
1079 hfsc_adjust_levels(parent);
1080 sch_tree_unlock(sch);
1081
1082 qdisc_class_hash_grow(sch, &q->clhash);
1083
1084 *arg = (unsigned long)cl;
1085 return 0;
1086}
1087
1088static void
1089hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1090{
1091 struct hfsc_sched *q = qdisc_priv(sch);
1092
1093 tcf_block_put(cl->block);
1094 qdisc_destroy(cl->qdisc);
1095 gen_kill_estimator(&cl->rate_est);
1096 if (cl != &q->root)
1097 kfree(cl);
1098}
1099
1100static int
1101hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1102{
1103 struct hfsc_sched *q = qdisc_priv(sch);
1104 struct hfsc_class *cl = (struct hfsc_class *)arg;
1105
1106 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1107 return -EBUSY;
1108
1109 sch_tree_lock(sch);
1110
1111 list_del(&cl->siblings);
1112 hfsc_adjust_levels(cl->cl_parent);
1113
1114 hfsc_purge_queue(sch, cl);
1115 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1116
1117 sch_tree_unlock(sch);
1118
1119 hfsc_destroy_class(sch, cl);
1120 return 0;
1121}
1122
1123static struct hfsc_class *
1124hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1125{
1126 struct hfsc_sched *q = qdisc_priv(sch);
1127 struct hfsc_class *head, *cl;
1128 struct tcf_result res;
1129 struct tcf_proto *tcf;
1130 int result;
1131
1132 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1133 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1134 if (cl->level == 0)
1135 return cl;
1136
1137 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1138 head = &q->root;
1139 tcf = rcu_dereference_bh(q->root.filter_list);
1140 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
1141#ifdef CONFIG_NET_CLS_ACT
1142 switch (result) {
1143 case TC_ACT_QUEUED:
1144 case TC_ACT_STOLEN:
1145 case TC_ACT_TRAP:
1146 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1147
1148 case TC_ACT_SHOT:
1149 return NULL;
1150 }
1151#endif
1152 cl = (struct hfsc_class *)res.class;
1153 if (!cl) {
1154 cl = hfsc_find_class(res.classid, sch);
1155 if (!cl)
1156 break;
1157 if (cl->level >= head->level)
1158 break;
1159 }
1160
1161 if (cl->level == 0)
1162 return cl;
1163
1164
1165 tcf = rcu_dereference_bh(cl->filter_list);
1166 head = cl;
1167 }
1168
1169
1170 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1171 if (cl == NULL || cl->level > 0)
1172 return NULL;
1173
1174 return cl;
1175}
1176
1177static int
1178hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1179 struct Qdisc **old)
1180{
1181 struct hfsc_class *cl = (struct hfsc_class *)arg;
1182
1183 if (cl->level > 0)
1184 return -EINVAL;
1185 if (new == NULL) {
1186 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1187 cl->cl_common.classid);
1188 if (new == NULL)
1189 new = &noop_qdisc;
1190 }
1191
1192 *old = qdisc_replace(sch, new, &cl->qdisc);
1193 return 0;
1194}
1195
1196static struct Qdisc *
1197hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1198{
1199 struct hfsc_class *cl = (struct hfsc_class *)arg;
1200
1201 if (cl->level == 0)
1202 return cl->qdisc;
1203
1204 return NULL;
1205}
1206
1207static void
1208hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1209{
1210 struct hfsc_class *cl = (struct hfsc_class *)arg;
1211
1212
1213
1214
1215 update_vf(cl, 0, 0);
1216 if (cl->cl_flags & HFSC_RSC)
1217 eltree_remove(cl);
1218}
1219
1220static unsigned long
1221hfsc_search_class(struct Qdisc *sch, u32 classid)
1222{
1223 return (unsigned long)hfsc_find_class(classid, sch);
1224}
1225
1226static unsigned long
1227hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1228{
1229 struct hfsc_class *p = (struct hfsc_class *)parent;
1230 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1231
1232 if (cl != NULL) {
1233 if (p != NULL && p->level <= cl->level)
1234 return 0;
1235 cl->filter_cnt++;
1236 }
1237
1238 return (unsigned long)cl;
1239}
1240
1241static void
1242hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1243{
1244 struct hfsc_class *cl = (struct hfsc_class *)arg;
1245
1246 cl->filter_cnt--;
1247}
1248
1249static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
1250{
1251 struct hfsc_sched *q = qdisc_priv(sch);
1252 struct hfsc_class *cl = (struct hfsc_class *)arg;
1253
1254 if (cl == NULL)
1255 cl = &q->root;
1256
1257 return cl->block;
1258}
1259
1260static int
1261hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1262{
1263 struct tc_service_curve tsc;
1264
1265 tsc.m1 = sm2m(sc->sm1);
1266 tsc.d = dx2d(sc->dx);
1267 tsc.m2 = sm2m(sc->sm2);
1268 if (nla_put(skb, attr, sizeof(tsc), &tsc))
1269 goto nla_put_failure;
1270
1271 return skb->len;
1272
1273 nla_put_failure:
1274 return -1;
1275}
1276
1277static int
1278hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1279{
1280 if ((cl->cl_flags & HFSC_RSC) &&
1281 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1282 goto nla_put_failure;
1283
1284 if ((cl->cl_flags & HFSC_FSC) &&
1285 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1286 goto nla_put_failure;
1287
1288 if ((cl->cl_flags & HFSC_USC) &&
1289 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1290 goto nla_put_failure;
1291
1292 return skb->len;
1293
1294 nla_put_failure:
1295 return -1;
1296}
1297
1298static int
1299hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1300 struct tcmsg *tcm)
1301{
1302 struct hfsc_class *cl = (struct hfsc_class *)arg;
1303 struct nlattr *nest;
1304
1305 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1306 TC_H_ROOT;
1307 tcm->tcm_handle = cl->cl_common.classid;
1308 if (cl->level == 0)
1309 tcm->tcm_info = cl->qdisc->handle;
1310
1311 nest = nla_nest_start(skb, TCA_OPTIONS);
1312 if (nest == NULL)
1313 goto nla_put_failure;
1314 if (hfsc_dump_curves(skb, cl) < 0)
1315 goto nla_put_failure;
1316 return nla_nest_end(skb, nest);
1317
1318 nla_put_failure:
1319 nla_nest_cancel(skb, nest);
1320 return -EMSGSIZE;
1321}
1322
1323static int
1324hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1325 struct gnet_dump *d)
1326{
1327 struct hfsc_class *cl = (struct hfsc_class *)arg;
1328 struct tc_hfsc_stats xstats;
1329
1330 cl->qstats.backlog = cl->qdisc->qstats.backlog;
1331 xstats.level = cl->level;
1332 xstats.period = cl->cl_vtperiod;
1333 xstats.work = cl->cl_total;
1334 xstats.rtwork = cl->cl_cumul;
1335
1336 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1337 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1338 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
1339 return -1;
1340
1341 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1342}
1343
1344
1345
1346static void
1347hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1348{
1349 struct hfsc_sched *q = qdisc_priv(sch);
1350 struct hfsc_class *cl;
1351 unsigned int i;
1352
1353 if (arg->stop)
1354 return;
1355
1356 for (i = 0; i < q->clhash.hashsize; i++) {
1357 hlist_for_each_entry(cl, &q->clhash.hash[i],
1358 cl_common.hnode) {
1359 if (arg->count < arg->skip) {
1360 arg->count++;
1361 continue;
1362 }
1363 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1364 arg->stop = 1;
1365 return;
1366 }
1367 arg->count++;
1368 }
1369 }
1370}
1371
1372static void
1373hfsc_schedule_watchdog(struct Qdisc *sch)
1374{
1375 struct hfsc_sched *q = qdisc_priv(sch);
1376 struct hfsc_class *cl;
1377 u64 next_time = 0;
1378
1379 cl = eltree_get_minel(q);
1380 if (cl)
1381 next_time = cl->cl_e;
1382 if (q->root.cl_cfmin != 0) {
1383 if (next_time == 0 || next_time > q->root.cl_cfmin)
1384 next_time = q->root.cl_cfmin;
1385 }
1386 WARN_ON(next_time == 0);
1387 qdisc_watchdog_schedule(&q->watchdog, next_time);
1388}
1389
1390static int
1391hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1392{
1393 struct hfsc_sched *q = qdisc_priv(sch);
1394 struct tc_hfsc_qopt *qopt;
1395 int err;
1396
1397 qdisc_watchdog_init(&q->watchdog, sch);
1398
1399 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1400 return -EINVAL;
1401 qopt = nla_data(opt);
1402
1403 q->defcls = qopt->defcls;
1404 err = qdisc_class_hash_init(&q->clhash);
1405 if (err < 0)
1406 return err;
1407 q->eligible = RB_ROOT;
1408
1409 err = tcf_block_get(&q->root.block, &q->root.filter_list, sch);
1410 if (err)
1411 return err;
1412
1413 q->root.cl_common.classid = sch->handle;
1414 q->root.sched = q;
1415 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1416 sch->handle);
1417 if (q->root.qdisc == NULL)
1418 q->root.qdisc = &noop_qdisc;
1419 else
1420 qdisc_hash_add(q->root.qdisc, true);
1421 INIT_LIST_HEAD(&q->root.children);
1422 q->root.vt_tree = RB_ROOT;
1423 q->root.cf_tree = RB_ROOT;
1424
1425 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1426 qdisc_class_hash_grow(sch, &q->clhash);
1427
1428 return 0;
1429}
1430
1431static int
1432hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1433{
1434 struct hfsc_sched *q = qdisc_priv(sch);
1435 struct tc_hfsc_qopt *qopt;
1436
1437 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1438 return -EINVAL;
1439 qopt = nla_data(opt);
1440
1441 sch_tree_lock(sch);
1442 q->defcls = qopt->defcls;
1443 sch_tree_unlock(sch);
1444
1445 return 0;
1446}
1447
1448static void
1449hfsc_reset_class(struct hfsc_class *cl)
1450{
1451 cl->cl_total = 0;
1452 cl->cl_cumul = 0;
1453 cl->cl_d = 0;
1454 cl->cl_e = 0;
1455 cl->cl_vt = 0;
1456 cl->cl_vtadj = 0;
1457 cl->cl_cvtmin = 0;
1458 cl->cl_cvtoff = 0;
1459 cl->cl_vtperiod = 0;
1460 cl->cl_parentperiod = 0;
1461 cl->cl_f = 0;
1462 cl->cl_myf = 0;
1463 cl->cl_cfmin = 0;
1464 cl->cl_nactive = 0;
1465
1466 cl->vt_tree = RB_ROOT;
1467 cl->cf_tree = RB_ROOT;
1468 qdisc_reset(cl->qdisc);
1469
1470 if (cl->cl_flags & HFSC_RSC)
1471 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1472 if (cl->cl_flags & HFSC_FSC)
1473 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1474 if (cl->cl_flags & HFSC_USC)
1475 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1476}
1477
1478static void
1479hfsc_reset_qdisc(struct Qdisc *sch)
1480{
1481 struct hfsc_sched *q = qdisc_priv(sch);
1482 struct hfsc_class *cl;
1483 unsigned int i;
1484
1485 for (i = 0; i < q->clhash.hashsize; i++) {
1486 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1487 hfsc_reset_class(cl);
1488 }
1489 q->eligible = RB_ROOT;
1490 qdisc_watchdog_cancel(&q->watchdog);
1491 sch->qstats.backlog = 0;
1492 sch->q.qlen = 0;
1493}
1494
1495static void
1496hfsc_destroy_qdisc(struct Qdisc *sch)
1497{
1498 struct hfsc_sched *q = qdisc_priv(sch);
1499 struct hlist_node *next;
1500 struct hfsc_class *cl;
1501 unsigned int i;
1502
1503 for (i = 0; i < q->clhash.hashsize; i++) {
1504 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
1505 tcf_block_put(cl->block);
1506 cl->block = NULL;
1507 }
1508 }
1509 for (i = 0; i < q->clhash.hashsize; i++) {
1510 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1511 cl_common.hnode)
1512 hfsc_destroy_class(sch, cl);
1513 }
1514 qdisc_class_hash_destroy(&q->clhash);
1515 qdisc_watchdog_cancel(&q->watchdog);
1516}
1517
1518static int
1519hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1520{
1521 struct hfsc_sched *q = qdisc_priv(sch);
1522 unsigned char *b = skb_tail_pointer(skb);
1523 struct tc_hfsc_qopt qopt;
1524
1525 qopt.defcls = q->defcls;
1526 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1527 goto nla_put_failure;
1528 return skb->len;
1529
1530 nla_put_failure:
1531 nlmsg_trim(skb, b);
1532 return -1;
1533}
1534
1535static int
1536hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1537{
1538 struct hfsc_class *cl;
1539 int uninitialized_var(err);
1540
1541 cl = hfsc_classify(skb, sch, &err);
1542 if (cl == NULL) {
1543 if (err & __NET_XMIT_BYPASS)
1544 qdisc_qstats_drop(sch);
1545 __qdisc_drop(skb, to_free);
1546 return err;
1547 }
1548
1549 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1550 if (unlikely(err != NET_XMIT_SUCCESS)) {
1551 if (net_xmit_drop_count(err)) {
1552 cl->qstats.drops++;
1553 qdisc_qstats_drop(sch);
1554 }
1555 return err;
1556 }
1557
1558 if (cl->qdisc->q.qlen == 1) {
1559 unsigned int len = qdisc_pkt_len(skb);
1560
1561 if (cl->cl_flags & HFSC_RSC)
1562 init_ed(cl, len);
1563 if (cl->cl_flags & HFSC_FSC)
1564 init_vf(cl, len);
1565
1566
1567
1568
1569
1570 if (cl->cl_flags & HFSC_RSC)
1571 cl->qdisc->ops->peek(cl->qdisc);
1572
1573 }
1574
1575 qdisc_qstats_backlog_inc(sch, skb);
1576 sch->q.qlen++;
1577
1578 return NET_XMIT_SUCCESS;
1579}
1580
1581static struct sk_buff *
1582hfsc_dequeue(struct Qdisc *sch)
1583{
1584 struct hfsc_sched *q = qdisc_priv(sch);
1585 struct hfsc_class *cl;
1586 struct sk_buff *skb;
1587 u64 cur_time;
1588 unsigned int next_len;
1589 int realtime = 0;
1590
1591 if (sch->q.qlen == 0)
1592 return NULL;
1593
1594 cur_time = psched_get_time();
1595
1596
1597
1598
1599
1600
1601 cl = eltree_get_mindl(q, cur_time);
1602 if (cl) {
1603 realtime = 1;
1604 } else {
1605
1606
1607
1608
1609 cl = vttree_get_minvt(&q->root, cur_time);
1610 if (cl == NULL) {
1611 qdisc_qstats_overlimit(sch);
1612 hfsc_schedule_watchdog(sch);
1613 return NULL;
1614 }
1615 }
1616
1617 skb = qdisc_dequeue_peeked(cl->qdisc);
1618 if (skb == NULL) {
1619 qdisc_warn_nonwc("HFSC", cl->qdisc);
1620 return NULL;
1621 }
1622
1623 bstats_update(&cl->bstats, skb);
1624 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1625 if (realtime)
1626 cl->cl_cumul += qdisc_pkt_len(skb);
1627
1628 if (cl->cl_flags & HFSC_RSC) {
1629 if (cl->qdisc->q.qlen != 0) {
1630
1631 next_len = qdisc_peek_len(cl->qdisc);
1632 if (realtime)
1633 update_ed(cl, next_len);
1634 else
1635 update_d(cl, next_len);
1636 } else {
1637
1638 eltree_remove(cl);
1639 }
1640 }
1641
1642 qdisc_bstats_update(sch, skb);
1643 qdisc_qstats_backlog_dec(sch, skb);
1644 sch->q.qlen--;
1645
1646 return skb;
1647}
1648
1649static const struct Qdisc_class_ops hfsc_class_ops = {
1650 .change = hfsc_change_class,
1651 .delete = hfsc_delete_class,
1652 .graft = hfsc_graft_class,
1653 .leaf = hfsc_class_leaf,
1654 .qlen_notify = hfsc_qlen_notify,
1655 .find = hfsc_search_class,
1656 .bind_tcf = hfsc_bind_tcf,
1657 .unbind_tcf = hfsc_unbind_tcf,
1658 .tcf_block = hfsc_tcf_block,
1659 .dump = hfsc_dump_class,
1660 .dump_stats = hfsc_dump_class_stats,
1661 .walk = hfsc_walk
1662};
1663
1664static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1665 .id = "hfsc",
1666 .init = hfsc_init_qdisc,
1667 .change = hfsc_change_qdisc,
1668 .reset = hfsc_reset_qdisc,
1669 .destroy = hfsc_destroy_qdisc,
1670 .dump = hfsc_dump_qdisc,
1671 .enqueue = hfsc_enqueue,
1672 .dequeue = hfsc_dequeue,
1673 .peek = qdisc_peek_dequeued,
1674 .cl_ops = &hfsc_class_ops,
1675 .priv_size = sizeof(struct hfsc_sched),
1676 .owner = THIS_MODULE
1677};
1678
1679static int __init
1680hfsc_init(void)
1681{
1682 return register_qdisc(&hfsc_qdisc_ops);
1683}
1684
1685static void __exit
1686hfsc_cleanup(void)
1687{
1688 unregister_qdisc(&hfsc_qdisc_ops);
1689}
1690
1691MODULE_LICENSE("GPL");
1692module_init(hfsc_init);
1693module_exit(hfsc_cleanup);
1694