linux/net/sched/sch_fq_codel.c
<<
>>
Prefs
   1/*
   2 * Fair Queue CoDel discipline
   3 *
   4 *      This program is free software; you can redistribute it and/or
   5 *      modify it under the terms of the GNU General Public License
   6 *      as published by the Free Software Foundation; either version
   7 *      2 of the License, or (at your option) any later version.
   8 *
   9 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/jiffies.h>
  16#include <linux/string.h>
  17#include <linux/in.h>
  18#include <linux/errno.h>
  19#include <linux/init.h>
  20#include <linux/skbuff.h>
  21#include <linux/jhash.h>
  22#include <linux/slab.h>
  23#include <linux/vmalloc.h>
  24#include <net/netlink.h>
  25#include <net/pkt_sched.h>
  26#include <net/pkt_cls.h>
  27#include <net/codel.h>
  28#include <net/codel_impl.h>
  29#include <net/codel_qdisc.h>
  30
  31/*      Fair Queue CoDel.
  32 *
  33 * Principles :
  34 * Packets are classified (internal classifier or external) on flows.
  35 * This is a Stochastic model (as we use a hash, several flows
  36 *                             might be hashed on same slot)
  37 * Each flow has a CoDel managed queue.
  38 * Flows are linked onto two (Round Robin) lists,
  39 * so that new flows have priority on old ones.
  40 *
  41 * For a given flow, packets are not reordered (CoDel uses a FIFO)
  42 * head drops only.
  43 * ECN capability is on by default.
  44 * Low memory footprint (64 bytes per flow)
  45 */
  46
  47struct fq_codel_flow {
  48        struct sk_buff    *head;
  49        struct sk_buff    *tail;
  50        struct list_head  flowchain;
  51        int               deficit;
  52        u32               dropped; /* number of drops (or ECN marks) on this flow */
  53        struct codel_vars cvars;
  54}; /* please try to keep this structure <= 64 bytes */
  55
  56struct fq_codel_sched_data {
  57        struct tcf_proto __rcu *filter_list; /* optional external classifier */
  58        struct tcf_block *block;
  59        struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
  60        u32             *backlogs;      /* backlog table [flows_cnt] */
  61        u32             flows_cnt;      /* number of flows */
  62        u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
  63        u32             drop_batch_size;
  64        u32             memory_limit;
  65        struct codel_params cparams;
  66        struct codel_stats cstats;
  67        u32             memory_usage;
  68        u32             drop_overmemory;
  69        u32             drop_overlimit;
  70        u32             new_flow_count;
  71
  72        struct list_head new_flows;     /* list of new flows */
  73        struct list_head old_flows;     /* list of old flows */
  74};
  75
  76static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
  77                                  struct sk_buff *skb)
  78{
  79        return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
  80}
  81
  82static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
  83                                      int *qerr)
  84{
  85        struct fq_codel_sched_data *q = qdisc_priv(sch);
  86        struct tcf_proto *filter;
  87        struct tcf_result res;
  88        int result;
  89
  90        if (TC_H_MAJ(skb->priority) == sch->handle &&
  91            TC_H_MIN(skb->priority) > 0 &&
  92            TC_H_MIN(skb->priority) <= q->flows_cnt)
  93                return TC_H_MIN(skb->priority);
  94
  95        filter = rcu_dereference_bh(q->filter_list);
  96        if (!filter)
  97                return fq_codel_hash(q, skb) + 1;
  98
  99        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 100        result = tcf_classify(skb, filter, &res, false);
 101        if (result >= 0) {
 102#ifdef CONFIG_NET_CLS_ACT
 103                switch (result) {
 104                case TC_ACT_STOLEN:
 105                case TC_ACT_QUEUED:
 106                case TC_ACT_TRAP:
 107                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 108                        /* fall through */
 109                case TC_ACT_SHOT:
 110                        return 0;
 111                }
 112#endif
 113                if (TC_H_MIN(res.classid) <= q->flows_cnt)
 114                        return TC_H_MIN(res.classid);
 115        }
 116        return 0;
 117}
 118
 119/* helper functions : might be changed when/if skb use a standard list_head */
 120
 121/* remove one skb from head of slot queue */
 122static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
 123{
 124        struct sk_buff *skb = flow->head;
 125
 126        flow->head = skb->next;
 127        skb->next = NULL;
 128        return skb;
 129}
 130
 131/* add skb to flow queue (tail add) */
 132static inline void flow_queue_add(struct fq_codel_flow *flow,
 133                                  struct sk_buff *skb)
 134{
 135        if (flow->head == NULL)
 136                flow->head = skb;
 137        else
 138                flow->tail->next = skb;
 139        flow->tail = skb;
 140        skb->next = NULL;
 141}
 142
 143static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
 144                                  struct sk_buff **to_free)
 145{
 146        struct fq_codel_sched_data *q = qdisc_priv(sch);
 147        struct sk_buff *skb;
 148        unsigned int maxbacklog = 0, idx = 0, i, len;
 149        struct fq_codel_flow *flow;
 150        unsigned int threshold;
 151        unsigned int mem = 0;
 152
 153        /* Queue is full! Find the fat flow and drop packet(s) from it.
 154         * This might sound expensive, but with 1024 flows, we scan
 155         * 4KB of memory, and we dont need to handle a complex tree
 156         * in fast path (packet queue/enqueue) with many cache misses.
 157         * In stress mode, we'll try to drop 64 packets from the flow,
 158         * amortizing this linear lookup to one cache line per drop.
 159         */
 160        for (i = 0; i < q->flows_cnt; i++) {
 161                if (q->backlogs[i] > maxbacklog) {
 162                        maxbacklog = q->backlogs[i];
 163                        idx = i;
 164                }
 165        }
 166
 167        /* Our goal is to drop half of this fat flow backlog */
 168        threshold = maxbacklog >> 1;
 169
 170        flow = &q->flows[idx];
 171        len = 0;
 172        i = 0;
 173        do {
 174                skb = dequeue_head(flow);
 175                len += qdisc_pkt_len(skb);
 176                mem += get_codel_cb(skb)->mem_usage;
 177                __qdisc_drop(skb, to_free);
 178        } while (++i < max_packets && len < threshold);
 179
 180        flow->dropped += i;
 181        q->backlogs[idx] -= len;
 182        q->memory_usage -= mem;
 183        sch->qstats.drops += i;
 184        sch->qstats.backlog -= len;
 185        sch->q.qlen -= i;
 186        return idx;
 187}
 188
 189static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 190                            struct sk_buff **to_free)
 191{
 192        struct fq_codel_sched_data *q = qdisc_priv(sch);
 193        unsigned int idx, prev_backlog, prev_qlen;
 194        struct fq_codel_flow *flow;
 195        int uninitialized_var(ret);
 196        unsigned int pkt_len;
 197        bool memory_limited;
 198
 199        idx = fq_codel_classify(skb, sch, &ret);
 200        if (idx == 0) {
 201                if (ret & __NET_XMIT_BYPASS)
 202                        qdisc_qstats_drop(sch);
 203                __qdisc_drop(skb, to_free);
 204                return ret;
 205        }
 206        idx--;
 207
 208        codel_set_enqueue_time(skb);
 209        flow = &q->flows[idx];
 210        flow_queue_add(flow, skb);
 211        q->backlogs[idx] += qdisc_pkt_len(skb);
 212        qdisc_qstats_backlog_inc(sch, skb);
 213
 214        if (list_empty(&flow->flowchain)) {
 215                list_add_tail(&flow->flowchain, &q->new_flows);
 216                q->new_flow_count++;
 217                flow->deficit = q->quantum;
 218                flow->dropped = 0;
 219        }
 220        get_codel_cb(skb)->mem_usage = skb->truesize;
 221        q->memory_usage += get_codel_cb(skb)->mem_usage;
 222        memory_limited = q->memory_usage > q->memory_limit;
 223        if (++sch->q.qlen <= sch->limit && !memory_limited)
 224                return NET_XMIT_SUCCESS;
 225
 226        prev_backlog = sch->qstats.backlog;
 227        prev_qlen = sch->q.qlen;
 228
 229        /* save this packet length as it might be dropped by fq_codel_drop() */
 230        pkt_len = qdisc_pkt_len(skb);
 231        /* fq_codel_drop() is quite expensive, as it performs a linear search
 232         * in q->backlogs[] to find a fat flow.
 233         * So instead of dropping a single packet, drop half of its backlog
 234         * with a 64 packets limit to not add a too big cpu spike here.
 235         */
 236        ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
 237
 238        prev_qlen -= sch->q.qlen;
 239        prev_backlog -= sch->qstats.backlog;
 240        q->drop_overlimit += prev_qlen;
 241        if (memory_limited)
 242                q->drop_overmemory += prev_qlen;
 243
 244        /* As we dropped packet(s), better let upper stack know this.
 245         * If we dropped a packet for this flow, return NET_XMIT_CN,
 246         * but in this case, our parents wont increase their backlogs.
 247         */
 248        if (ret == idx) {
 249                qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
 250                                          prev_backlog - pkt_len);
 251                return NET_XMIT_CN;
 252        }
 253        qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
 254        return NET_XMIT_SUCCESS;
 255}
 256
 257/* This is the specific function called from codel_dequeue()
 258 * to dequeue a packet from queue. Note: backlog is handled in
 259 * codel, we dont need to reduce it here.
 260 */
 261static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
 262{
 263        struct Qdisc *sch = ctx;
 264        struct fq_codel_sched_data *q = qdisc_priv(sch);
 265        struct fq_codel_flow *flow;
 266        struct sk_buff *skb = NULL;
 267
 268        flow = container_of(vars, struct fq_codel_flow, cvars);
 269        if (flow->head) {
 270                skb = dequeue_head(flow);
 271                q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
 272                q->memory_usage -= get_codel_cb(skb)->mem_usage;
 273                sch->q.qlen--;
 274                sch->qstats.backlog -= qdisc_pkt_len(skb);
 275        }
 276        return skb;
 277}
 278
 279static void drop_func(struct sk_buff *skb, void *ctx)
 280{
 281        struct Qdisc *sch = ctx;
 282
 283        kfree_skb(skb);
 284        qdisc_qstats_drop(sch);
 285}
 286
 287static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
 288{
 289        struct fq_codel_sched_data *q = qdisc_priv(sch);
 290        struct sk_buff *skb;
 291        struct fq_codel_flow *flow;
 292        struct list_head *head;
 293        u32 prev_drop_count, prev_ecn_mark;
 294
 295begin:
 296        head = &q->new_flows;
 297        if (list_empty(head)) {
 298                head = &q->old_flows;
 299                if (list_empty(head))
 300                        return NULL;
 301        }
 302        flow = list_first_entry(head, struct fq_codel_flow, flowchain);
 303
 304        if (flow->deficit <= 0) {
 305                flow->deficit += q->quantum;
 306                list_move_tail(&flow->flowchain, &q->old_flows);
 307                goto begin;
 308        }
 309
 310        prev_drop_count = q->cstats.drop_count;
 311        prev_ecn_mark = q->cstats.ecn_mark;
 312
 313        skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
 314                            &flow->cvars, &q->cstats, qdisc_pkt_len,
 315                            codel_get_enqueue_time, drop_func, dequeue_func);
 316
 317        flow->dropped += q->cstats.drop_count - prev_drop_count;
 318        flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
 319
 320        if (!skb) {
 321                /* force a pass through old_flows to prevent starvation */
 322                if ((head == &q->new_flows) && !list_empty(&q->old_flows))
 323                        list_move_tail(&flow->flowchain, &q->old_flows);
 324                else
 325                        list_del_init(&flow->flowchain);
 326                goto begin;
 327        }
 328        qdisc_bstats_update(sch, skb);
 329        flow->deficit -= qdisc_pkt_len(skb);
 330        /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
 331         * or HTB crashes. Defer it for next round.
 332         */
 333        if (q->cstats.drop_count && sch->q.qlen) {
 334                qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
 335                                          q->cstats.drop_len);
 336                q->cstats.drop_count = 0;
 337                q->cstats.drop_len = 0;
 338        }
 339        return skb;
 340}
 341
 342static void fq_codel_flow_purge(struct fq_codel_flow *flow)
 343{
 344        rtnl_kfree_skbs(flow->head, flow->tail);
 345        flow->head = NULL;
 346}
 347
 348static void fq_codel_reset(struct Qdisc *sch)
 349{
 350        struct fq_codel_sched_data *q = qdisc_priv(sch);
 351        int i;
 352
 353        INIT_LIST_HEAD(&q->new_flows);
 354        INIT_LIST_HEAD(&q->old_flows);
 355        for (i = 0; i < q->flows_cnt; i++) {
 356                struct fq_codel_flow *flow = q->flows + i;
 357
 358                fq_codel_flow_purge(flow);
 359                INIT_LIST_HEAD(&flow->flowchain);
 360                codel_vars_init(&flow->cvars);
 361        }
 362        memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
 363        sch->q.qlen = 0;
 364        sch->qstats.backlog = 0;
 365        q->memory_usage = 0;
 366}
 367
 368static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
 369        [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
 370        [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
 371        [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
 372        [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
 373        [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
 374        [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
 375        [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
 376        [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
 377        [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 378};
 379
 380static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
 381{
 382        struct fq_codel_sched_data *q = qdisc_priv(sch);
 383        struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
 384        int err;
 385
 386        if (!opt)
 387                return -EINVAL;
 388
 389        err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy,
 390                               NULL);
 391        if (err < 0)
 392                return err;
 393        if (tb[TCA_FQ_CODEL_FLOWS]) {
 394                if (q->flows)
 395                        return -EINVAL;
 396                q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
 397                if (!q->flows_cnt ||
 398                    q->flows_cnt > 65536)
 399                        return -EINVAL;
 400        }
 401        sch_tree_lock(sch);
 402
 403        if (tb[TCA_FQ_CODEL_TARGET]) {
 404                u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
 405
 406                q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
 407        }
 408
 409        if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
 410                u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
 411
 412                q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
 413        }
 414
 415        if (tb[TCA_FQ_CODEL_INTERVAL]) {
 416                u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
 417
 418                q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
 419        }
 420
 421        if (tb[TCA_FQ_CODEL_LIMIT])
 422                sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
 423
 424        if (tb[TCA_FQ_CODEL_ECN])
 425                q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
 426
 427        if (tb[TCA_FQ_CODEL_QUANTUM])
 428                q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
 429
 430        if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
 431                q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
 432
 433        if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
 434                q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
 435
 436        while (sch->q.qlen > sch->limit ||
 437               q->memory_usage > q->memory_limit) {
 438                struct sk_buff *skb = fq_codel_dequeue(sch);
 439
 440                q->cstats.drop_len += qdisc_pkt_len(skb);
 441                rtnl_kfree_skbs(skb, skb);
 442                q->cstats.drop_count++;
 443        }
 444        qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
 445        q->cstats.drop_count = 0;
 446        q->cstats.drop_len = 0;
 447
 448        sch_tree_unlock(sch);
 449        return 0;
 450}
 451
 452static void fq_codel_destroy(struct Qdisc *sch)
 453{
 454        struct fq_codel_sched_data *q = qdisc_priv(sch);
 455
 456        tcf_block_put(q->block);
 457        kvfree(q->backlogs);
 458        kvfree(q->flows);
 459}
 460
 461static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
 462{
 463        struct fq_codel_sched_data *q = qdisc_priv(sch);
 464        int i;
 465        int err;
 466
 467        sch->limit = 10*1024;
 468        q->flows_cnt = 1024;
 469        q->memory_limit = 32 << 20; /* 32 MBytes */
 470        q->drop_batch_size = 64;
 471        q->quantum = psched_mtu(qdisc_dev(sch));
 472        INIT_LIST_HEAD(&q->new_flows);
 473        INIT_LIST_HEAD(&q->old_flows);
 474        codel_params_init(&q->cparams);
 475        codel_stats_init(&q->cstats);
 476        q->cparams.ecn = true;
 477        q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 478
 479        if (opt) {
 480                int err = fq_codel_change(sch, opt);
 481                if (err)
 482                        return err;
 483        }
 484
 485        err = tcf_block_get(&q->block, &q->filter_list, sch);
 486        if (err)
 487                return err;
 488
 489        if (!q->flows) {
 490                q->flows = kvzalloc(q->flows_cnt *
 491                                           sizeof(struct fq_codel_flow), GFP_KERNEL);
 492                if (!q->flows)
 493                        return -ENOMEM;
 494                q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
 495                if (!q->backlogs)
 496                        return -ENOMEM;
 497                for (i = 0; i < q->flows_cnt; i++) {
 498                        struct fq_codel_flow *flow = q->flows + i;
 499
 500                        INIT_LIST_HEAD(&flow->flowchain);
 501                        codel_vars_init(&flow->cvars);
 502                }
 503        }
 504        if (sch->limit >= 1)
 505                sch->flags |= TCQ_F_CAN_BYPASS;
 506        else
 507                sch->flags &= ~TCQ_F_CAN_BYPASS;
 508        return 0;
 509}
 510
 511static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
 512{
 513        struct fq_codel_sched_data *q = qdisc_priv(sch);
 514        struct nlattr *opts;
 515
 516        opts = nla_nest_start(skb, TCA_OPTIONS);
 517        if (opts == NULL)
 518                goto nla_put_failure;
 519
 520        if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
 521                        codel_time_to_us(q->cparams.target)) ||
 522            nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
 523                        sch->limit) ||
 524            nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
 525                        codel_time_to_us(q->cparams.interval)) ||
 526            nla_put_u32(skb, TCA_FQ_CODEL_ECN,
 527                        q->cparams.ecn) ||
 528            nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
 529                        q->quantum) ||
 530            nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
 531                        q->drop_batch_size) ||
 532            nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
 533                        q->memory_limit) ||
 534            nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
 535                        q->flows_cnt))
 536                goto nla_put_failure;
 537
 538        if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
 539            nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
 540                        codel_time_to_us(q->cparams.ce_threshold)))
 541                goto nla_put_failure;
 542
 543        return nla_nest_end(skb, opts);
 544
 545nla_put_failure:
 546        return -1;
 547}
 548
 549static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 550{
 551        struct fq_codel_sched_data *q = qdisc_priv(sch);
 552        struct tc_fq_codel_xstats st = {
 553                .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
 554        };
 555        struct list_head *pos;
 556
 557        st.qdisc_stats.maxpacket = q->cstats.maxpacket;
 558        st.qdisc_stats.drop_overlimit = q->drop_overlimit;
 559        st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
 560        st.qdisc_stats.new_flow_count = q->new_flow_count;
 561        st.qdisc_stats.ce_mark = q->cstats.ce_mark;
 562        st.qdisc_stats.memory_usage  = q->memory_usage;
 563        st.qdisc_stats.drop_overmemory = q->drop_overmemory;
 564
 565        sch_tree_lock(sch);
 566        list_for_each(pos, &q->new_flows)
 567                st.qdisc_stats.new_flows_len++;
 568
 569        list_for_each(pos, &q->old_flows)
 570                st.qdisc_stats.old_flows_len++;
 571        sch_tree_unlock(sch);
 572
 573        return gnet_stats_copy_app(d, &st, sizeof(st));
 574}
 575
 576static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
 577{
 578        return NULL;
 579}
 580
 581static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
 582{
 583        return 0;
 584}
 585
 586static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
 587                              u32 classid)
 588{
 589        /* we cannot bypass queue discipline anymore */
 590        sch->flags &= ~TCQ_F_CAN_BYPASS;
 591        return 0;
 592}
 593
 594static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
 595{
 596}
 597
 598static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
 599{
 600        struct fq_codel_sched_data *q = qdisc_priv(sch);
 601
 602        if (cl)
 603                return NULL;
 604        return q->block;
 605}
 606
 607static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
 608                          struct sk_buff *skb, struct tcmsg *tcm)
 609{
 610        tcm->tcm_handle |= TC_H_MIN(cl);
 611        return 0;
 612}
 613
 614static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 615                                     struct gnet_dump *d)
 616{
 617        struct fq_codel_sched_data *q = qdisc_priv(sch);
 618        u32 idx = cl - 1;
 619        struct gnet_stats_queue qs = { 0 };
 620        struct tc_fq_codel_xstats xstats;
 621
 622        if (idx < q->flows_cnt) {
 623                const struct fq_codel_flow *flow = &q->flows[idx];
 624                const struct sk_buff *skb;
 625
 626                memset(&xstats, 0, sizeof(xstats));
 627                xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
 628                xstats.class_stats.deficit = flow->deficit;
 629                xstats.class_stats.ldelay =
 630                        codel_time_to_us(flow->cvars.ldelay);
 631                xstats.class_stats.count = flow->cvars.count;
 632                xstats.class_stats.lastcount = flow->cvars.lastcount;
 633                xstats.class_stats.dropping = flow->cvars.dropping;
 634                if (flow->cvars.dropping) {
 635                        codel_tdiff_t delta = flow->cvars.drop_next -
 636                                              codel_get_time();
 637
 638                        xstats.class_stats.drop_next = (delta >= 0) ?
 639                                codel_time_to_us(delta) :
 640                                -codel_time_to_us(-delta);
 641                }
 642                if (flow->head) {
 643                        sch_tree_lock(sch);
 644                        skb = flow->head;
 645                        while (skb) {
 646                                qs.qlen++;
 647                                skb = skb->next;
 648                        }
 649                        sch_tree_unlock(sch);
 650                }
 651                qs.backlog = q->backlogs[idx];
 652                qs.drops = flow->dropped;
 653        }
 654        if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
 655                return -1;
 656        if (idx < q->flows_cnt)
 657                return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
 658        return 0;
 659}
 660
 661static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 662{
 663        struct fq_codel_sched_data *q = qdisc_priv(sch);
 664        unsigned int i;
 665
 666        if (arg->stop)
 667                return;
 668
 669        for (i = 0; i < q->flows_cnt; i++) {
 670                if (list_empty(&q->flows[i].flowchain) ||
 671                    arg->count < arg->skip) {
 672                        arg->count++;
 673                        continue;
 674                }
 675                if (arg->fn(sch, i + 1, arg) < 0) {
 676                        arg->stop = 1;
 677                        break;
 678                }
 679                arg->count++;
 680        }
 681}
 682
 683static const struct Qdisc_class_ops fq_codel_class_ops = {
 684        .leaf           =       fq_codel_leaf,
 685        .find           =       fq_codel_find,
 686        .tcf_block      =       fq_codel_tcf_block,
 687        .bind_tcf       =       fq_codel_bind,
 688        .unbind_tcf     =       fq_codel_unbind,
 689        .dump           =       fq_codel_dump_class,
 690        .dump_stats     =       fq_codel_dump_class_stats,
 691        .walk           =       fq_codel_walk,
 692};
 693
 694static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
 695        .cl_ops         =       &fq_codel_class_ops,
 696        .id             =       "fq_codel",
 697        .priv_size      =       sizeof(struct fq_codel_sched_data),
 698        .enqueue        =       fq_codel_enqueue,
 699        .dequeue        =       fq_codel_dequeue,
 700        .peek           =       qdisc_peek_dequeued,
 701        .init           =       fq_codel_init,
 702        .reset          =       fq_codel_reset,
 703        .destroy        =       fq_codel_destroy,
 704        .change         =       fq_codel_change,
 705        .dump           =       fq_codel_dump,
 706        .dump_stats =   fq_codel_dump_stats,
 707        .owner          =       THIS_MODULE,
 708};
 709
 710static int __init fq_codel_module_init(void)
 711{
 712        return register_qdisc(&fq_codel_qdisc_ops);
 713}
 714
 715static void __exit fq_codel_module_exit(void)
 716{
 717        unregister_qdisc(&fq_codel_qdisc_ops);
 718}
 719
 720module_init(fq_codel_module_init)
 721module_exit(fq_codel_module_exit)
 722MODULE_AUTHOR("Eric Dumazet");
 723MODULE_LICENSE("GPL");
 724