1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/jhash.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <net/netlink.h>
25#include <net/pkt_sched.h>
26#include <net/pkt_cls.h>
27#include <net/codel.h>
28#include <net/codel_impl.h>
29#include <net/codel_qdisc.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47struct fq_codel_flow {
48 struct sk_buff *head;
49 struct sk_buff *tail;
50 struct list_head flowchain;
51 int deficit;
52 u32 dropped;
53 struct codel_vars cvars;
54};
55
56struct fq_codel_sched_data {
57 struct tcf_proto __rcu *filter_list;
58 struct tcf_block *block;
59 struct fq_codel_flow *flows;
60 u32 *backlogs;
61 u32 flows_cnt;
62 u32 quantum;
63 u32 drop_batch_size;
64 u32 memory_limit;
65 struct codel_params cparams;
66 struct codel_stats cstats;
67 u32 memory_usage;
68 u32 drop_overmemory;
69 u32 drop_overlimit;
70 u32 new_flow_count;
71
72 struct list_head new_flows;
73 struct list_head old_flows;
74};
75
76static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
77 struct sk_buff *skb)
78{
79 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
80}
81
82static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
83 int *qerr)
84{
85 struct fq_codel_sched_data *q = qdisc_priv(sch);
86 struct tcf_proto *filter;
87 struct tcf_result res;
88 int result;
89
90 if (TC_H_MAJ(skb->priority) == sch->handle &&
91 TC_H_MIN(skb->priority) > 0 &&
92 TC_H_MIN(skb->priority) <= q->flows_cnt)
93 return TC_H_MIN(skb->priority);
94
95 filter = rcu_dereference_bh(q->filter_list);
96 if (!filter)
97 return fq_codel_hash(q, skb) + 1;
98
99 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
100 result = tcf_classify(skb, filter, &res, false);
101 if (result >= 0) {
102#ifdef CONFIG_NET_CLS_ACT
103 switch (result) {
104 case TC_ACT_STOLEN:
105 case TC_ACT_QUEUED:
106 case TC_ACT_TRAP:
107 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108 case TC_ACT_SHOT:
109 return 0;
110 }
111#endif
112 if (TC_H_MIN(res.classid) <= q->flows_cnt)
113 return TC_H_MIN(res.classid);
114 }
115 return 0;
116}
117
118
119
120
121static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
122{
123 struct sk_buff *skb = flow->head;
124
125 flow->head = skb->next;
126 skb->next = NULL;
127 return skb;
128}
129
130
131static inline void flow_queue_add(struct fq_codel_flow *flow,
132 struct sk_buff *skb)
133{
134 if (flow->head == NULL)
135 flow->head = skb;
136 else
137 flow->tail->next = skb;
138 flow->tail = skb;
139 skb->next = NULL;
140}
141
142static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
143 struct sk_buff **to_free)
144{
145 struct fq_codel_sched_data *q = qdisc_priv(sch);
146 struct sk_buff *skb;
147 unsigned int maxbacklog = 0, idx = 0, i, len;
148 struct fq_codel_flow *flow;
149 unsigned int threshold;
150 unsigned int mem = 0;
151
152
153
154
155
156
157
158
159 for (i = 0; i < q->flows_cnt; i++) {
160 if (q->backlogs[i] > maxbacklog) {
161 maxbacklog = q->backlogs[i];
162 idx = i;
163 }
164 }
165
166
167 threshold = maxbacklog >> 1;
168
169 flow = &q->flows[idx];
170 len = 0;
171 i = 0;
172 do {
173 skb = dequeue_head(flow);
174 len += qdisc_pkt_len(skb);
175 mem += get_codel_cb(skb)->mem_usage;
176 __qdisc_drop(skb, to_free);
177 } while (++i < max_packets && len < threshold);
178
179 flow->dropped += i;
180 q->backlogs[idx] -= len;
181 q->memory_usage -= mem;
182 sch->qstats.drops += i;
183 sch->qstats.backlog -= len;
184 sch->q.qlen -= i;
185 return idx;
186}
187
188static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
189 struct sk_buff **to_free)
190{
191 struct fq_codel_sched_data *q = qdisc_priv(sch);
192 unsigned int idx, prev_backlog, prev_qlen;
193 struct fq_codel_flow *flow;
194 int uninitialized_var(ret);
195 unsigned int pkt_len;
196 bool memory_limited;
197
198 idx = fq_codel_classify(skb, sch, &ret);
199 if (idx == 0) {
200 if (ret & __NET_XMIT_BYPASS)
201 qdisc_qstats_drop(sch);
202 __qdisc_drop(skb, to_free);
203 return ret;
204 }
205 idx--;
206
207 codel_set_enqueue_time(skb);
208 flow = &q->flows[idx];
209 flow_queue_add(flow, skb);
210 q->backlogs[idx] += qdisc_pkt_len(skb);
211 qdisc_qstats_backlog_inc(sch, skb);
212
213 if (list_empty(&flow->flowchain)) {
214 list_add_tail(&flow->flowchain, &q->new_flows);
215 q->new_flow_count++;
216 flow->deficit = q->quantum;
217 flow->dropped = 0;
218 }
219 get_codel_cb(skb)->mem_usage = skb->truesize;
220 q->memory_usage += get_codel_cb(skb)->mem_usage;
221 memory_limited = q->memory_usage > q->memory_limit;
222 if (++sch->q.qlen <= sch->limit && !memory_limited)
223 return NET_XMIT_SUCCESS;
224
225 prev_backlog = sch->qstats.backlog;
226 prev_qlen = sch->q.qlen;
227
228
229 pkt_len = qdisc_pkt_len(skb);
230
231
232
233
234
235 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
236
237 prev_qlen -= sch->q.qlen;
238 prev_backlog -= sch->qstats.backlog;
239 q->drop_overlimit += prev_qlen;
240 if (memory_limited)
241 q->drop_overmemory += prev_qlen;
242
243
244
245
246
247 if (ret == idx) {
248 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
249 prev_backlog - pkt_len);
250 return NET_XMIT_CN;
251 }
252 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
253 return NET_XMIT_SUCCESS;
254}
255
256
257
258
259
260static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
261{
262 struct Qdisc *sch = ctx;
263 struct fq_codel_sched_data *q = qdisc_priv(sch);
264 struct fq_codel_flow *flow;
265 struct sk_buff *skb = NULL;
266
267 flow = container_of(vars, struct fq_codel_flow, cvars);
268 if (flow->head) {
269 skb = dequeue_head(flow);
270 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
271 q->memory_usage -= get_codel_cb(skb)->mem_usage;
272 sch->q.qlen--;
273 sch->qstats.backlog -= qdisc_pkt_len(skb);
274 }
275 return skb;
276}
277
278static void drop_func(struct sk_buff *skb, void *ctx)
279{
280 struct Qdisc *sch = ctx;
281
282 kfree_skb(skb);
283 qdisc_qstats_drop(sch);
284}
285
286static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
287{
288 struct fq_codel_sched_data *q = qdisc_priv(sch);
289 struct sk_buff *skb;
290 struct fq_codel_flow *flow;
291 struct list_head *head;
292 u32 prev_drop_count, prev_ecn_mark;
293
294begin:
295 head = &q->new_flows;
296 if (list_empty(head)) {
297 head = &q->old_flows;
298 if (list_empty(head))
299 return NULL;
300 }
301 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
302
303 if (flow->deficit <= 0) {
304 flow->deficit += q->quantum;
305 list_move_tail(&flow->flowchain, &q->old_flows);
306 goto begin;
307 }
308
309 prev_drop_count = q->cstats.drop_count;
310 prev_ecn_mark = q->cstats.ecn_mark;
311
312 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
313 &flow->cvars, &q->cstats, qdisc_pkt_len,
314 codel_get_enqueue_time, drop_func, dequeue_func);
315
316 flow->dropped += q->cstats.drop_count - prev_drop_count;
317 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
318
319 if (!skb) {
320
321 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
322 list_move_tail(&flow->flowchain, &q->old_flows);
323 else
324 list_del_init(&flow->flowchain);
325 goto begin;
326 }
327 qdisc_bstats_update(sch, skb);
328 flow->deficit -= qdisc_pkt_len(skb);
329
330
331
332 if (q->cstats.drop_count && sch->q.qlen) {
333 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
334 q->cstats.drop_len);
335 q->cstats.drop_count = 0;
336 q->cstats.drop_len = 0;
337 }
338 return skb;
339}
340
341static void fq_codel_flow_purge(struct fq_codel_flow *flow)
342{
343 rtnl_kfree_skbs(flow->head, flow->tail);
344 flow->head = NULL;
345}
346
347static void fq_codel_reset(struct Qdisc *sch)
348{
349 struct fq_codel_sched_data *q = qdisc_priv(sch);
350 int i;
351
352 INIT_LIST_HEAD(&q->new_flows);
353 INIT_LIST_HEAD(&q->old_flows);
354 for (i = 0; i < q->flows_cnt; i++) {
355 struct fq_codel_flow *flow = q->flows + i;
356
357 fq_codel_flow_purge(flow);
358 INIT_LIST_HEAD(&flow->flowchain);
359 codel_vars_init(&flow->cvars);
360 }
361 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
362 sch->q.qlen = 0;
363 sch->qstats.backlog = 0;
364 q->memory_usage = 0;
365}
366
367static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
368 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
369 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
370 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
371 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
372 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
373 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
374 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
375 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
376 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
377};
378
379static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
380{
381 struct fq_codel_sched_data *q = qdisc_priv(sch);
382 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
383 int err;
384
385 if (!opt)
386 return -EINVAL;
387
388 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy,
389 NULL);
390 if (err < 0)
391 return err;
392 if (tb[TCA_FQ_CODEL_FLOWS]) {
393 if (q->flows)
394 return -EINVAL;
395 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
396 if (!q->flows_cnt ||
397 q->flows_cnt > 65536)
398 return -EINVAL;
399 }
400 sch_tree_lock(sch);
401
402 if (tb[TCA_FQ_CODEL_TARGET]) {
403 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
404
405 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
406 }
407
408 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
409 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
410
411 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
412 }
413
414 if (tb[TCA_FQ_CODEL_INTERVAL]) {
415 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
416
417 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
418 }
419
420 if (tb[TCA_FQ_CODEL_LIMIT])
421 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
422
423 if (tb[TCA_FQ_CODEL_ECN])
424 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
425
426 if (tb[TCA_FQ_CODEL_QUANTUM])
427 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
428
429 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
430 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
431
432 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
433 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
434
435 while (sch->q.qlen > sch->limit ||
436 q->memory_usage > q->memory_limit) {
437 struct sk_buff *skb = fq_codel_dequeue(sch);
438
439 q->cstats.drop_len += qdisc_pkt_len(skb);
440 rtnl_kfree_skbs(skb, skb);
441 q->cstats.drop_count++;
442 }
443 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
444 q->cstats.drop_count = 0;
445 q->cstats.drop_len = 0;
446
447 sch_tree_unlock(sch);
448 return 0;
449}
450
451static void fq_codel_destroy(struct Qdisc *sch)
452{
453 struct fq_codel_sched_data *q = qdisc_priv(sch);
454
455 tcf_block_put(q->block);
456 kvfree(q->backlogs);
457 kvfree(q->flows);
458}
459
460static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
461{
462 struct fq_codel_sched_data *q = qdisc_priv(sch);
463 int i;
464 int err;
465
466 sch->limit = 10*1024;
467 q->flows_cnt = 1024;
468 q->memory_limit = 32 << 20;
469 q->drop_batch_size = 64;
470 q->quantum = psched_mtu(qdisc_dev(sch));
471 INIT_LIST_HEAD(&q->new_flows);
472 INIT_LIST_HEAD(&q->old_flows);
473 codel_params_init(&q->cparams);
474 codel_stats_init(&q->cstats);
475 q->cparams.ecn = true;
476 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
477
478 if (opt) {
479 int err = fq_codel_change(sch, opt);
480 if (err)
481 return err;
482 }
483
484 err = tcf_block_get(&q->block, &q->filter_list);
485 if (err)
486 return err;
487
488 if (!q->flows) {
489 q->flows = kvzalloc(q->flows_cnt *
490 sizeof(struct fq_codel_flow), GFP_KERNEL);
491 if (!q->flows)
492 return -ENOMEM;
493 q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
494 if (!q->backlogs)
495 return -ENOMEM;
496 for (i = 0; i < q->flows_cnt; i++) {
497 struct fq_codel_flow *flow = q->flows + i;
498
499 INIT_LIST_HEAD(&flow->flowchain);
500 codel_vars_init(&flow->cvars);
501 }
502 }
503 if (sch->limit >= 1)
504 sch->flags |= TCQ_F_CAN_BYPASS;
505 else
506 sch->flags &= ~TCQ_F_CAN_BYPASS;
507 return 0;
508}
509
510static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
511{
512 struct fq_codel_sched_data *q = qdisc_priv(sch);
513 struct nlattr *opts;
514
515 opts = nla_nest_start(skb, TCA_OPTIONS);
516 if (opts == NULL)
517 goto nla_put_failure;
518
519 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
520 codel_time_to_us(q->cparams.target)) ||
521 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
522 sch->limit) ||
523 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
524 codel_time_to_us(q->cparams.interval)) ||
525 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
526 q->cparams.ecn) ||
527 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
528 q->quantum) ||
529 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
530 q->drop_batch_size) ||
531 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
532 q->memory_limit) ||
533 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
534 q->flows_cnt))
535 goto nla_put_failure;
536
537 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
538 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
539 codel_time_to_us(q->cparams.ce_threshold)))
540 goto nla_put_failure;
541
542 return nla_nest_end(skb, opts);
543
544nla_put_failure:
545 return -1;
546}
547
548static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
549{
550 struct fq_codel_sched_data *q = qdisc_priv(sch);
551 struct tc_fq_codel_xstats st = {
552 .type = TCA_FQ_CODEL_XSTATS_QDISC,
553 };
554 struct list_head *pos;
555
556 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
557 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
558 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
559 st.qdisc_stats.new_flow_count = q->new_flow_count;
560 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
561 st.qdisc_stats.memory_usage = q->memory_usage;
562 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
563
564 sch_tree_lock(sch);
565 list_for_each(pos, &q->new_flows)
566 st.qdisc_stats.new_flows_len++;
567
568 list_for_each(pos, &q->old_flows)
569 st.qdisc_stats.old_flows_len++;
570 sch_tree_unlock(sch);
571
572 return gnet_stats_copy_app(d, &st, sizeof(st));
573}
574
575static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
576{
577 return NULL;
578}
579
580static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
581{
582 return 0;
583}
584
585static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
586 u32 classid)
587{
588
589 sch->flags &= ~TCQ_F_CAN_BYPASS;
590 return 0;
591}
592
593static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
594{
595}
596
597static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
598{
599 struct fq_codel_sched_data *q = qdisc_priv(sch);
600
601 if (cl)
602 return NULL;
603 return q->block;
604}
605
606static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
607 struct sk_buff *skb, struct tcmsg *tcm)
608{
609 tcm->tcm_handle |= TC_H_MIN(cl);
610 return 0;
611}
612
613static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
614 struct gnet_dump *d)
615{
616 struct fq_codel_sched_data *q = qdisc_priv(sch);
617 u32 idx = cl - 1;
618 struct gnet_stats_queue qs = { 0 };
619 struct tc_fq_codel_xstats xstats;
620
621 if (idx < q->flows_cnt) {
622 const struct fq_codel_flow *flow = &q->flows[idx];
623 const struct sk_buff *skb;
624
625 memset(&xstats, 0, sizeof(xstats));
626 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
627 xstats.class_stats.deficit = flow->deficit;
628 xstats.class_stats.ldelay =
629 codel_time_to_us(flow->cvars.ldelay);
630 xstats.class_stats.count = flow->cvars.count;
631 xstats.class_stats.lastcount = flow->cvars.lastcount;
632 xstats.class_stats.dropping = flow->cvars.dropping;
633 if (flow->cvars.dropping) {
634 codel_tdiff_t delta = flow->cvars.drop_next -
635 codel_get_time();
636
637 xstats.class_stats.drop_next = (delta >= 0) ?
638 codel_time_to_us(delta) :
639 -codel_time_to_us(-delta);
640 }
641 if (flow->head) {
642 sch_tree_lock(sch);
643 skb = flow->head;
644 while (skb) {
645 qs.qlen++;
646 skb = skb->next;
647 }
648 sch_tree_unlock(sch);
649 }
650 qs.backlog = q->backlogs[idx];
651 qs.drops = flow->dropped;
652 }
653 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
654 return -1;
655 if (idx < q->flows_cnt)
656 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
657 return 0;
658}
659
660static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
661{
662 struct fq_codel_sched_data *q = qdisc_priv(sch);
663 unsigned int i;
664
665 if (arg->stop)
666 return;
667
668 for (i = 0; i < q->flows_cnt; i++) {
669 if (list_empty(&q->flows[i].flowchain) ||
670 arg->count < arg->skip) {
671 arg->count++;
672 continue;
673 }
674 if (arg->fn(sch, i + 1, arg) < 0) {
675 arg->stop = 1;
676 break;
677 }
678 arg->count++;
679 }
680}
681
682static const struct Qdisc_class_ops fq_codel_class_ops = {
683 .leaf = fq_codel_leaf,
684 .find = fq_codel_find,
685 .tcf_block = fq_codel_tcf_block,
686 .bind_tcf = fq_codel_bind,
687 .unbind_tcf = fq_codel_unbind,
688 .dump = fq_codel_dump_class,
689 .dump_stats = fq_codel_dump_class_stats,
690 .walk = fq_codel_walk,
691};
692
693static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
694 .cl_ops = &fq_codel_class_ops,
695 .id = "fq_codel",
696 .priv_size = sizeof(struct fq_codel_sched_data),
697 .enqueue = fq_codel_enqueue,
698 .dequeue = fq_codel_dequeue,
699 .peek = qdisc_peek_dequeued,
700 .init = fq_codel_init,
701 .reset = fq_codel_reset,
702 .destroy = fq_codel_destroy,
703 .change = fq_codel_change,
704 .dump = fq_codel_dump,
705 .dump_stats = fq_codel_dump_stats,
706 .owner = THIS_MODULE,
707};
708
709static int __init fq_codel_module_init(void)
710{
711 return register_qdisc(&fq_codel_qdisc_ops);
712}
713
714static void __exit fq_codel_module_exit(void)
715{
716 unregister_qdisc(&fq_codel_qdisc_ops);
717}
718
719module_init(fq_codel_module_init)
720module_exit(fq_codel_module_exit)
721MODULE_AUTHOR("Eric Dumazet");
722MODULE_LICENSE("GPL");
723