1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/jiffies.h>
12#include <linux/string.h>
13#include <linux/in.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/skbuff.h>
17#include <linux/jhash.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <net/netlink.h>
21#include <net/pkt_sched.h>
22#include <net/pkt_cls.h>
23#include <net/red.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74#define SFQ_MAX_DEPTH 127
75#define SFQ_DEFAULT_FLOWS 128
76#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1)
77#define SFQ_EMPTY_SLOT 0xffff
78#define SFQ_DEFAULT_HASH_DIVISOR 1024
79
80
81
82
83#define SFQ_ALLOT_SHIFT 3
84#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
85
86
87typedef u16 sfq_index;
88
89
90
91
92
93
94
95struct sfq_head {
96 sfq_index next;
97 sfq_index prev;
98};
99
100struct sfq_slot {
101 struct sk_buff *skblist_next;
102 struct sk_buff *skblist_prev;
103 sfq_index qlen;
104 sfq_index next;
105 struct sfq_head dep;
106 unsigned short hash;
107 short allot;
108
109 unsigned int backlog;
110 struct red_vars vars;
111};
112
113struct sfq_sched_data {
114
115 int limit;
116 unsigned int divisor;
117 u8 headdrop;
118 u8 maxdepth;
119
120 u32 perturbation;
121 u8 cur_depth;
122 u8 flags;
123 unsigned short scaled_quantum;
124 struct tcf_proto __rcu *filter_list;
125 struct tcf_block *block;
126 sfq_index *ht;
127 struct sfq_slot *slots;
128
129 struct red_parms *red_parms;
130 struct tc_sfqred_stats stats;
131 struct sfq_slot *tail;
132
133 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
134
135
136
137
138
139
140 unsigned int maxflows;
141 int perturb_period;
142 unsigned int quantum;
143 struct timer_list perturb_timer;
144 struct Qdisc *sch;
145};
146
147
148
149
150static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
151{
152 if (val < SFQ_MAX_FLOWS)
153 return &q->slots[val].dep;
154 return &q->dep[val - SFQ_MAX_FLOWS];
155}
156
157static unsigned int sfq_hash(const struct sfq_sched_data *q,
158 const struct sk_buff *skb)
159{
160 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
161}
162
163static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
164 int *qerr)
165{
166 struct sfq_sched_data *q = qdisc_priv(sch);
167 struct tcf_result res;
168 struct tcf_proto *fl;
169 int result;
170
171 if (TC_H_MAJ(skb->priority) == sch->handle &&
172 TC_H_MIN(skb->priority) > 0 &&
173 TC_H_MIN(skb->priority) <= q->divisor)
174 return TC_H_MIN(skb->priority);
175
176 fl = rcu_dereference_bh(q->filter_list);
177 if (!fl)
178 return sfq_hash(q, skb) + 1;
179
180 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
181 result = tcf_classify(skb, fl, &res, false);
182 if (result >= 0) {
183#ifdef CONFIG_NET_CLS_ACT
184 switch (result) {
185 case TC_ACT_STOLEN:
186 case TC_ACT_QUEUED:
187 case TC_ACT_TRAP:
188 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
189
190 case TC_ACT_SHOT:
191 return 0;
192 }
193#endif
194 if (TC_H_MIN(res.classid) <= q->divisor)
195 return TC_H_MIN(res.classid);
196 }
197 return 0;
198}
199
200
201
202
203static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
204{
205 sfq_index p, n;
206 struct sfq_slot *slot = &q->slots[x];
207 int qlen = slot->qlen;
208
209 p = qlen + SFQ_MAX_FLOWS;
210 n = q->dep[qlen].next;
211
212 slot->dep.next = n;
213 slot->dep.prev = p;
214
215 q->dep[qlen].next = x;
216 sfq_dep_head(q, n)->prev = x;
217}
218
219#define sfq_unlink(q, x, n, p) \
220 do { \
221 n = q->slots[x].dep.next; \
222 p = q->slots[x].dep.prev; \
223 sfq_dep_head(q, p)->next = n; \
224 sfq_dep_head(q, n)->prev = p; \
225 } while (0)
226
227
228static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
229{
230 sfq_index p, n;
231 int d;
232
233 sfq_unlink(q, x, n, p);
234
235 d = q->slots[x].qlen--;
236 if (n == p && q->cur_depth == d)
237 q->cur_depth--;
238 sfq_link(q, x);
239}
240
241static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
242{
243 sfq_index p, n;
244 int d;
245
246 sfq_unlink(q, x, n, p);
247
248 d = ++q->slots[x].qlen;
249 if (q->cur_depth < d)
250 q->cur_depth = d;
251 sfq_link(q, x);
252}
253
254
255
256
257static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
258{
259 struct sk_buff *skb = slot->skblist_prev;
260
261 slot->skblist_prev = skb->prev;
262 skb->prev->next = (struct sk_buff *)slot;
263 skb->next = skb->prev = NULL;
264 return skb;
265}
266
267
268static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
269{
270 struct sk_buff *skb = slot->skblist_next;
271
272 slot->skblist_next = skb->next;
273 skb->next->prev = (struct sk_buff *)slot;
274 skb->next = skb->prev = NULL;
275 return skb;
276}
277
278static inline void slot_queue_init(struct sfq_slot *slot)
279{
280 memset(slot, 0, sizeof(*slot));
281 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
282}
283
284
285static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
286{
287 skb->prev = slot->skblist_prev;
288 skb->next = (struct sk_buff *)slot;
289 slot->skblist_prev->next = skb;
290 slot->skblist_prev = skb;
291}
292
293static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
294{
295 struct sfq_sched_data *q = qdisc_priv(sch);
296 sfq_index x, d = q->cur_depth;
297 struct sk_buff *skb;
298 unsigned int len;
299 struct sfq_slot *slot;
300
301
302 if (d > 1) {
303 x = q->dep[d].next;
304 slot = &q->slots[x];
305drop:
306 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
307 len = qdisc_pkt_len(skb);
308 slot->backlog -= len;
309 sfq_dec(q, x);
310 sch->q.qlen--;
311 qdisc_qstats_backlog_dec(sch, skb);
312 qdisc_drop(skb, sch, to_free);
313 return len;
314 }
315
316 if (d == 1) {
317
318 x = q->tail->next;
319 slot = &q->slots[x];
320 q->tail->next = slot->next;
321 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
322 goto drop;
323 }
324
325 return 0;
326}
327
328
329static int sfq_prob_mark(const struct sfq_sched_data *q)
330{
331 return q->flags & TC_RED_ECN;
332}
333
334
335static int sfq_hard_mark(const struct sfq_sched_data *q)
336{
337 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
338}
339
340static int sfq_headdrop(const struct sfq_sched_data *q)
341{
342 return q->headdrop;
343}
344
345static int
346sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
347{
348 struct sfq_sched_data *q = qdisc_priv(sch);
349 unsigned int hash, dropped;
350 sfq_index x, qlen;
351 struct sfq_slot *slot;
352 int uninitialized_var(ret);
353 struct sk_buff *head;
354 int delta;
355
356 hash = sfq_classify(skb, sch, &ret);
357 if (hash == 0) {
358 if (ret & __NET_XMIT_BYPASS)
359 qdisc_qstats_drop(sch);
360 __qdisc_drop(skb, to_free);
361 return ret;
362 }
363 hash--;
364
365 x = q->ht[hash];
366 slot = &q->slots[x];
367 if (x == SFQ_EMPTY_SLOT) {
368 x = q->dep[0].next;
369 if (x >= SFQ_MAX_FLOWS)
370 return qdisc_drop(skb, sch, to_free);
371 q->ht[hash] = x;
372 slot = &q->slots[x];
373 slot->hash = hash;
374 slot->backlog = 0;
375 red_set_vars(&slot->vars);
376 goto enqueue;
377 }
378 if (q->red_parms) {
379 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
380 &slot->vars,
381 slot->backlog);
382 switch (red_action(q->red_parms,
383 &slot->vars,
384 slot->vars.qavg)) {
385 case RED_DONT_MARK:
386 break;
387
388 case RED_PROB_MARK:
389 qdisc_qstats_overlimit(sch);
390 if (sfq_prob_mark(q)) {
391
392 if (sfq_headdrop(q) &&
393 INET_ECN_set_ce(slot->skblist_next)) {
394 q->stats.prob_mark_head++;
395 break;
396 }
397 if (INET_ECN_set_ce(skb)) {
398 q->stats.prob_mark++;
399 break;
400 }
401 }
402 q->stats.prob_drop++;
403 goto congestion_drop;
404
405 case RED_HARD_MARK:
406 qdisc_qstats_overlimit(sch);
407 if (sfq_hard_mark(q)) {
408
409 if (sfq_headdrop(q) &&
410 INET_ECN_set_ce(slot->skblist_next)) {
411 q->stats.forced_mark_head++;
412 break;
413 }
414 if (INET_ECN_set_ce(skb)) {
415 q->stats.forced_mark++;
416 break;
417 }
418 }
419 q->stats.forced_drop++;
420 goto congestion_drop;
421 }
422 }
423
424 if (slot->qlen >= q->maxdepth) {
425congestion_drop:
426 if (!sfq_headdrop(q))
427 return qdisc_drop(skb, sch, to_free);
428
429
430 head = slot_dequeue_head(slot);
431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
432 sch->qstats.backlog -= delta;
433 slot->backlog -= delta;
434 qdisc_drop(head, sch, to_free);
435
436 slot_queue_add(slot, skb);
437 qdisc_tree_reduce_backlog(sch, 0, delta);
438 return NET_XMIT_CN;
439 }
440
441enqueue:
442 qdisc_qstats_backlog_inc(sch, skb);
443 slot->backlog += qdisc_pkt_len(skb);
444 slot_queue_add(slot, skb);
445 sfq_inc(q, x);
446 if (slot->qlen == 1) {
447 if (q->tail == NULL) {
448 slot->next = x;
449 } else {
450 slot->next = q->tail->next;
451 q->tail->next = x;
452 }
453
454
455
456
457 q->tail = slot;
458
459 slot->allot = q->scaled_quantum;
460 }
461 if (++sch->q.qlen <= q->limit)
462 return NET_XMIT_SUCCESS;
463
464 qlen = slot->qlen;
465 dropped = sfq_drop(sch, to_free);
466
467
468
469 if (qlen != slot->qlen) {
470 qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
471 return NET_XMIT_CN;
472 }
473
474
475 qdisc_tree_reduce_backlog(sch, 1, dropped);
476 return NET_XMIT_SUCCESS;
477}
478
479static struct sk_buff *
480sfq_dequeue(struct Qdisc *sch)
481{
482 struct sfq_sched_data *q = qdisc_priv(sch);
483 struct sk_buff *skb;
484 sfq_index a, next_a;
485 struct sfq_slot *slot;
486
487
488 if (q->tail == NULL)
489 return NULL;
490
491next_slot:
492 a = q->tail->next;
493 slot = &q->slots[a];
494 if (slot->allot <= 0) {
495 q->tail = slot;
496 slot->allot += q->scaled_quantum;
497 goto next_slot;
498 }
499 skb = slot_dequeue_head(slot);
500 sfq_dec(q, a);
501 qdisc_bstats_update(sch, skb);
502 sch->q.qlen--;
503 qdisc_qstats_backlog_dec(sch, skb);
504 slot->backlog -= qdisc_pkt_len(skb);
505
506 if (slot->qlen == 0) {
507 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
508 next_a = slot->next;
509 if (a == next_a) {
510 q->tail = NULL;
511 return skb;
512 }
513 q->tail->next = next_a;
514 } else {
515 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
516 }
517 return skb;
518}
519
520static void
521sfq_reset(struct Qdisc *sch)
522{
523 struct sk_buff *skb;
524
525 while ((skb = sfq_dequeue(sch)) != NULL)
526 rtnl_kfree_skbs(skb, skb);
527}
528
529
530
531
532
533
534
535static void sfq_rehash(struct Qdisc *sch)
536{
537 struct sfq_sched_data *q = qdisc_priv(sch);
538 struct sk_buff *skb;
539 int i;
540 struct sfq_slot *slot;
541 struct sk_buff_head list;
542 int dropped = 0;
543 unsigned int drop_len = 0;
544
545 __skb_queue_head_init(&list);
546
547 for (i = 0; i < q->maxflows; i++) {
548 slot = &q->slots[i];
549 if (!slot->qlen)
550 continue;
551 while (slot->qlen) {
552 skb = slot_dequeue_head(slot);
553 sfq_dec(q, i);
554 __skb_queue_tail(&list, skb);
555 }
556 slot->backlog = 0;
557 red_set_vars(&slot->vars);
558 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
559 }
560 q->tail = NULL;
561
562 while ((skb = __skb_dequeue(&list)) != NULL) {
563 unsigned int hash = sfq_hash(q, skb);
564 sfq_index x = q->ht[hash];
565
566 slot = &q->slots[x];
567 if (x == SFQ_EMPTY_SLOT) {
568 x = q->dep[0].next;
569 if (x >= SFQ_MAX_FLOWS) {
570drop:
571 qdisc_qstats_backlog_dec(sch, skb);
572 drop_len += qdisc_pkt_len(skb);
573 kfree_skb(skb);
574 dropped++;
575 continue;
576 }
577 q->ht[hash] = x;
578 slot = &q->slots[x];
579 slot->hash = hash;
580 }
581 if (slot->qlen >= q->maxdepth)
582 goto drop;
583 slot_queue_add(slot, skb);
584 if (q->red_parms)
585 slot->vars.qavg = red_calc_qavg(q->red_parms,
586 &slot->vars,
587 slot->backlog);
588 slot->backlog += qdisc_pkt_len(skb);
589 sfq_inc(q, x);
590 if (slot->qlen == 1) {
591 if (q->tail == NULL) {
592 slot->next = x;
593 } else {
594 slot->next = q->tail->next;
595 q->tail->next = x;
596 }
597 q->tail = slot;
598 slot->allot = q->scaled_quantum;
599 }
600 }
601 sch->q.qlen -= dropped;
602 qdisc_tree_reduce_backlog(sch, dropped, drop_len);
603}
604
605static void sfq_perturbation(struct timer_list *t)
606{
607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
608 struct Qdisc *sch = q->sch;
609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
610
611 spin_lock(root_lock);
612 q->perturbation = prandom_u32();
613 if (!q->filter_list && q->tail)
614 sfq_rehash(sch);
615 spin_unlock(root_lock);
616
617 if (q->perturb_period)
618 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
619}
620
621static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
622{
623 struct sfq_sched_data *q = qdisc_priv(sch);
624 struct tc_sfq_qopt *ctl = nla_data(opt);
625 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
626 unsigned int qlen, dropped = 0;
627 struct red_parms *p = NULL;
628 struct sk_buff *to_free = NULL;
629 struct sk_buff *tail = NULL;
630
631 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
632 return -EINVAL;
633 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
634 ctl_v1 = nla_data(opt);
635 if (ctl->divisor &&
636 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
637 return -EINVAL;
638 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
639 ctl_v1->Wlog))
640 return -EINVAL;
641 if (ctl_v1 && ctl_v1->qth_min) {
642 p = kmalloc(sizeof(*p), GFP_KERNEL);
643 if (!p)
644 return -ENOMEM;
645 }
646 sch_tree_lock(sch);
647 if (ctl->quantum) {
648 q->quantum = ctl->quantum;
649 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
650 }
651 q->perturb_period = ctl->perturb_period * HZ;
652 if (ctl->flows)
653 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
654 if (ctl->divisor) {
655 q->divisor = ctl->divisor;
656 q->maxflows = min_t(u32, q->maxflows, q->divisor);
657 }
658 if (ctl_v1) {
659 if (ctl_v1->depth)
660 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
661 if (p) {
662 swap(q->red_parms, p);
663 red_set_parms(q->red_parms,
664 ctl_v1->qth_min, ctl_v1->qth_max,
665 ctl_v1->Wlog,
666 ctl_v1->Plog, ctl_v1->Scell_log,
667 NULL,
668 ctl_v1->max_P);
669 }
670 q->flags = ctl_v1->flags;
671 q->headdrop = ctl_v1->headdrop;
672 }
673 if (ctl->limit) {
674 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
675 q->maxflows = min_t(u32, q->maxflows, q->limit);
676 }
677
678 qlen = sch->q.qlen;
679 while (sch->q.qlen > q->limit) {
680 dropped += sfq_drop(sch, &to_free);
681 if (!tail)
682 tail = to_free;
683 }
684
685 rtnl_kfree_skbs(to_free, tail);
686 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
687
688 del_timer(&q->perturb_timer);
689 if (q->perturb_period) {
690 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
691 q->perturbation = prandom_u32();
692 }
693 sch_tree_unlock(sch);
694 kfree(p);
695 return 0;
696}
697
698static void *sfq_alloc(size_t sz)
699{
700 return kvmalloc(sz, GFP_KERNEL);
701}
702
703static void sfq_free(void *addr)
704{
705 kvfree(addr);
706}
707
708static void sfq_destroy(struct Qdisc *sch)
709{
710 struct sfq_sched_data *q = qdisc_priv(sch);
711
712 tcf_block_put(q->block);
713 q->perturb_period = 0;
714 del_timer_sync(&q->perturb_timer);
715 sfq_free(q->ht);
716 sfq_free(q->slots);
717 kfree(q->red_parms);
718}
719
720static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
721 struct netlink_ext_ack *extack)
722{
723 struct sfq_sched_data *q = qdisc_priv(sch);
724 int i;
725 int err;
726
727 q->sch = sch;
728 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
729
730 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
731 if (err)
732 return err;
733
734 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
735 q->dep[i].next = i + SFQ_MAX_FLOWS;
736 q->dep[i].prev = i + SFQ_MAX_FLOWS;
737 }
738
739 q->limit = SFQ_MAX_DEPTH;
740 q->maxdepth = SFQ_MAX_DEPTH;
741 q->cur_depth = 0;
742 q->tail = NULL;
743 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
744 q->maxflows = SFQ_DEFAULT_FLOWS;
745 q->quantum = psched_mtu(qdisc_dev(sch));
746 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
747 q->perturb_period = 0;
748 q->perturbation = prandom_u32();
749
750 if (opt) {
751 int err = sfq_change(sch, opt);
752 if (err)
753 return err;
754 }
755
756 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
757 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
758 if (!q->ht || !q->slots) {
759
760 return -ENOMEM;
761 }
762
763 for (i = 0; i < q->divisor; i++)
764 q->ht[i] = SFQ_EMPTY_SLOT;
765
766 for (i = 0; i < q->maxflows; i++) {
767 slot_queue_init(&q->slots[i]);
768 sfq_link(q, i);
769 }
770 if (q->limit >= 1)
771 sch->flags |= TCQ_F_CAN_BYPASS;
772 else
773 sch->flags &= ~TCQ_F_CAN_BYPASS;
774 return 0;
775}
776
777static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
778{
779 struct sfq_sched_data *q = qdisc_priv(sch);
780 unsigned char *b = skb_tail_pointer(skb);
781 struct tc_sfq_qopt_v1 opt;
782 struct red_parms *p = q->red_parms;
783
784 memset(&opt, 0, sizeof(opt));
785 opt.v0.quantum = q->quantum;
786 opt.v0.perturb_period = q->perturb_period / HZ;
787 opt.v0.limit = q->limit;
788 opt.v0.divisor = q->divisor;
789 opt.v0.flows = q->maxflows;
790 opt.depth = q->maxdepth;
791 opt.headdrop = q->headdrop;
792
793 if (p) {
794 opt.qth_min = p->qth_min >> p->Wlog;
795 opt.qth_max = p->qth_max >> p->Wlog;
796 opt.Wlog = p->Wlog;
797 opt.Plog = p->Plog;
798 opt.Scell_log = p->Scell_log;
799 opt.max_P = p->max_P;
800 }
801 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
802 opt.flags = q->flags;
803
804 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
805 goto nla_put_failure;
806
807 return skb->len;
808
809nla_put_failure:
810 nlmsg_trim(skb, b);
811 return -1;
812}
813
814static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
815{
816 return NULL;
817}
818
819static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
820{
821 return 0;
822}
823
824static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
825 u32 classid)
826{
827 return 0;
828}
829
830static void sfq_unbind(struct Qdisc *q, unsigned long cl)
831{
832}
833
834static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
835 struct netlink_ext_ack *extack)
836{
837 struct sfq_sched_data *q = qdisc_priv(sch);
838
839 if (cl)
840 return NULL;
841 return q->block;
842}
843
844static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
845 struct sk_buff *skb, struct tcmsg *tcm)
846{
847 tcm->tcm_handle |= TC_H_MIN(cl);
848 return 0;
849}
850
851static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
852 struct gnet_dump *d)
853{
854 struct sfq_sched_data *q = qdisc_priv(sch);
855 sfq_index idx = q->ht[cl - 1];
856 struct gnet_stats_queue qs = { 0 };
857 struct tc_sfq_xstats xstats = { 0 };
858
859 if (idx != SFQ_EMPTY_SLOT) {
860 const struct sfq_slot *slot = &q->slots[idx];
861
862 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
863 qs.qlen = slot->qlen;
864 qs.backlog = slot->backlog;
865 }
866 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
867 return -1;
868 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
869}
870
871static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
872{
873 struct sfq_sched_data *q = qdisc_priv(sch);
874 unsigned int i;
875
876 if (arg->stop)
877 return;
878
879 for (i = 0; i < q->divisor; i++) {
880 if (q->ht[i] == SFQ_EMPTY_SLOT ||
881 arg->count < arg->skip) {
882 arg->count++;
883 continue;
884 }
885 if (arg->fn(sch, i + 1, arg) < 0) {
886 arg->stop = 1;
887 break;
888 }
889 arg->count++;
890 }
891}
892
893static const struct Qdisc_class_ops sfq_class_ops = {
894 .leaf = sfq_leaf,
895 .find = sfq_find,
896 .tcf_block = sfq_tcf_block,
897 .bind_tcf = sfq_bind,
898 .unbind_tcf = sfq_unbind,
899 .dump = sfq_dump_class,
900 .dump_stats = sfq_dump_class_stats,
901 .walk = sfq_walk,
902};
903
904static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
905 .cl_ops = &sfq_class_ops,
906 .id = "sfq",
907 .priv_size = sizeof(struct sfq_sched_data),
908 .enqueue = sfq_enqueue,
909 .dequeue = sfq_dequeue,
910 .peek = qdisc_peek_dequeued,
911 .init = sfq_init,
912 .reset = sfq_reset,
913 .destroy = sfq_destroy,
914 .change = NULL,
915 .dump = sfq_dump,
916 .owner = THIS_MODULE,
917};
918
919static int __init sfq_module_init(void)
920{
921 return register_qdisc(&sfq_qdisc_ops);
922}
923static void __exit sfq_module_exit(void)
924{
925 unregister_qdisc(&sfq_qdisc_ops);
926}
927module_init(sfq_module_init)
928module_exit(sfq_module_exit)
929MODULE_LICENSE("GPL");
930