linux/net/sched/sch_fq.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
   3 *
   4 *  Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 *
  11 *  Meant to be mostly used for localy generated traffic :
  12 *  Fast classification depends on skb->sk being set before reaching us.
  13 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
  14 *  All packets belonging to a socket are considered as a 'flow'.
  15 *
  16 *  Flows are dynamically allocated and stored in a hash table of RB trees
  17 *  They are also part of one Round Robin 'queues' (new or old flows)
  18 *
  19 *  Burst avoidance (aka pacing) capability :
  20 *
  21 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
  22 *  bunch of packets, and this packet scheduler adds delay between
  23 *  packets to respect rate limitation.
  24 *
  25 *  enqueue() :
  26 *   - lookup one RB tree (out of 1024 or more) to find the flow.
  27 *     If non existent flow, create it, add it to the tree.
  28 *     Add skb to the per flow list of skb (fifo).
  29 *   - Use a special fifo for high prio packets
  30 *
  31 *  dequeue() : serves flows in Round Robin
  32 *  Note : When a flow becomes empty, we do not immediately remove it from
  33 *  rb trees, for performance reasons (its expected to send additional packets,
  34 *  or SLAB cache will reuse socket for another flow)
  35 */
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/kernel.h>
  40#include <linux/jiffies.h>
  41#include <linux/string.h>
  42#include <linux/in.h>
  43#include <linux/errno.h>
  44#include <linux/init.h>
  45#include <linux/skbuff.h>
  46#include <linux/slab.h>
  47#include <linux/rbtree.h>
  48#include <linux/hash.h>
  49#include <linux/prefetch.h>
  50#include <net/netlink.h>
  51#include <net/pkt_sched.h>
  52#include <net/sock.h>
  53#include <net/tcp_states.h>
  54
  55/*
  56 * Per flow structure, dynamically allocated
  57 */
  58struct fq_flow {
  59        struct sk_buff  *head;          /* list of skbs for this flow : first skb */
  60        union {
  61                struct sk_buff *tail;   /* last skb in the list */
  62                unsigned long  age;     /* jiffies when flow was emptied, for gc */
  63        };
  64        struct rb_node  fq_node;        /* anchor in fq_root[] trees */
  65        struct sock     *sk;
  66        int             qlen;           /* number of packets in flow queue */
  67        int             credit;
  68        u32             socket_hash;    /* sk_hash */
  69        struct fq_flow *next;           /* next pointer in RR lists, or &detached */
  70
  71        struct rb_node  rate_node;      /* anchor in q->delayed tree */
  72        u64             time_next_packet;
  73};
  74
  75struct fq_flow_head {
  76        struct fq_flow *first;
  77        struct fq_flow *last;
  78};
  79
  80struct fq_sched_data {
  81        struct fq_flow_head new_flows;
  82
  83        struct fq_flow_head old_flows;
  84
  85        struct rb_root  delayed;        /* for rate limited flows */
  86        u64             time_next_delayed_flow;
  87
  88        struct fq_flow  internal;       /* for non classified or high prio packets */
  89        u32             quantum;
  90        u32             initial_quantum;
  91        u32             flow_refill_delay;
  92        u32             flow_max_rate;  /* optional max rate per flow */
  93        u32             flow_plimit;    /* max packets per flow */
  94        struct rb_root  *fq_root;
  95        u8              rate_enable;
  96        u8              fq_trees_log;
  97
  98        u32             flows;
  99        u32             inactive_flows;
 100        u32             throttled_flows;
 101
 102        u64             stat_gc_flows;
 103        u64             stat_internal_packets;
 104        u64             stat_tcp_retrans;
 105        u64             stat_throttled;
 106        u64             stat_flows_plimit;
 107        u64             stat_pkts_too_long;
 108        u64             stat_allocation_errors;
 109        struct qdisc_watchdog watchdog;
 110};
 111
 112/* special value to mark a detached flow (not on old/new list) */
 113static struct fq_flow detached, throttled;
 114
 115static void fq_flow_set_detached(struct fq_flow *f)
 116{
 117        f->next = &detached;
 118        f->age = jiffies;
 119}
 120
 121static bool fq_flow_is_detached(const struct fq_flow *f)
 122{
 123        return f->next == &detached;
 124}
 125
 126static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 127{
 128        struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
 129
 130        while (*p) {
 131                struct fq_flow *aux;
 132
 133                parent = *p;
 134                aux = container_of(parent, struct fq_flow, rate_node);
 135                if (f->time_next_packet >= aux->time_next_packet)
 136                        p = &parent->rb_right;
 137                else
 138                        p = &parent->rb_left;
 139        }
 140        rb_link_node(&f->rate_node, parent, p);
 141        rb_insert_color(&f->rate_node, &q->delayed);
 142        q->throttled_flows++;
 143        q->stat_throttled++;
 144
 145        f->next = &throttled;
 146        if (q->time_next_delayed_flow > f->time_next_packet)
 147                q->time_next_delayed_flow = f->time_next_packet;
 148}
 149
 150
 151static struct kmem_cache *fq_flow_cachep __read_mostly;
 152
 153static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
 154{
 155        if (head->first)
 156                head->last->next = flow;
 157        else
 158                head->first = flow;
 159        head->last = flow;
 160        flow->next = NULL;
 161}
 162
 163/* limit number of collected flows per round */
 164#define FQ_GC_MAX 8
 165#define FQ_GC_AGE (3*HZ)
 166
 167static bool fq_gc_candidate(const struct fq_flow *f)
 168{
 169        return fq_flow_is_detached(f) &&
 170               time_after(jiffies, f->age + FQ_GC_AGE);
 171}
 172
 173static void fq_gc(struct fq_sched_data *q,
 174                  struct rb_root *root,
 175                  struct sock *sk)
 176{
 177        struct fq_flow *f, *tofree[FQ_GC_MAX];
 178        struct rb_node **p, *parent;
 179        int fcnt = 0;
 180
 181        p = &root->rb_node;
 182        parent = NULL;
 183        while (*p) {
 184                parent = *p;
 185
 186                f = container_of(parent, struct fq_flow, fq_node);
 187                if (f->sk == sk)
 188                        break;
 189
 190                if (fq_gc_candidate(f)) {
 191                        tofree[fcnt++] = f;
 192                        if (fcnt == FQ_GC_MAX)
 193                                break;
 194                }
 195
 196                if (f->sk > sk)
 197                        p = &parent->rb_right;
 198                else
 199                        p = &parent->rb_left;
 200        }
 201
 202        q->flows -= fcnt;
 203        q->inactive_flows -= fcnt;
 204        q->stat_gc_flows += fcnt;
 205        while (fcnt) {
 206                struct fq_flow *f = tofree[--fcnt];
 207
 208                rb_erase(&f->fq_node, root);
 209                kmem_cache_free(fq_flow_cachep, f);
 210        }
 211}
 212
 213static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 214{
 215        struct rb_node **p, *parent;
 216        struct sock *sk = skb->sk;
 217        struct rb_root *root;
 218        struct fq_flow *f;
 219
 220        /* warning: no starvation prevention... */
 221        if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
 222                return &q->internal;
 223
 224        if (unlikely(!sk)) {
 225                /* By forcing low order bit to 1, we make sure to not
 226                 * collide with a local flow (socket pointers are word aligned)
 227                 */
 228                sk = (struct sock *)(skb_get_rxhash(skb) | 1L);
 229        }
 230
 231        root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
 232
 233        if (q->flows >= (2U << q->fq_trees_log) &&
 234            q->inactive_flows > q->flows/2)
 235                fq_gc(q, root, sk);
 236
 237        p = &root->rb_node;
 238        parent = NULL;
 239        while (*p) {
 240                parent = *p;
 241
 242                f = container_of(parent, struct fq_flow, fq_node);
 243                if (f->sk == sk) {
 244                        /* socket might have been reallocated, so check
 245                         * if its sk_hash is the same.
 246                         * It not, we need to refill credit with
 247                         * initial quantum
 248                         */
 249                        if (unlikely(skb->sk &&
 250                                     f->socket_hash != sk->sk_hash)) {
 251                                f->credit = q->initial_quantum;
 252                                f->socket_hash = sk->sk_hash;
 253                                f->time_next_packet = 0ULL;
 254                        }
 255                        return f;
 256                }
 257                if (f->sk > sk)
 258                        p = &parent->rb_right;
 259                else
 260                        p = &parent->rb_left;
 261        }
 262
 263        f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
 264        if (unlikely(!f)) {
 265                q->stat_allocation_errors++;
 266                return &q->internal;
 267        }
 268        fq_flow_set_detached(f);
 269        f->sk = sk;
 270        if (skb->sk)
 271                f->socket_hash = sk->sk_hash;
 272        f->credit = q->initial_quantum;
 273
 274        rb_link_node(&f->fq_node, parent, p);
 275        rb_insert_color(&f->fq_node, root);
 276
 277        q->flows++;
 278        q->inactive_flows++;
 279        return f;
 280}
 281
 282
 283/* remove one skb from head of flow queue */
 284static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
 285{
 286        struct sk_buff *skb = flow->head;
 287
 288        if (skb) {
 289                flow->head = skb->next;
 290                skb->next = NULL;
 291                flow->qlen--;
 292                sch->qstats.backlog -= qdisc_pkt_len(skb);
 293                sch->q.qlen--;
 294        }
 295        return skb;
 296}
 297
 298/* We might add in the future detection of retransmits
 299 * For the time being, just return false
 300 */
 301static bool skb_is_retransmit(struct sk_buff *skb)
 302{
 303        return false;
 304}
 305
 306/* add skb to flow queue
 307 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
 308 * We special case tcp retransmits to be transmitted before other packets.
 309 * We rely on fact that TCP retransmits are unlikely, so we do not waste
 310 * a separate queue or a pointer.
 311 * head->  [retrans pkt 1]
 312 *         [retrans pkt 2]
 313 *         [ normal pkt 1]
 314 *         [ normal pkt 2]
 315 *         [ normal pkt 3]
 316 * tail->  [ normal pkt 4]
 317 */
 318static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
 319{
 320        struct sk_buff *prev, *head = flow->head;
 321
 322        skb->next = NULL;
 323        if (!head) {
 324                flow->head = skb;
 325                flow->tail = skb;
 326                return;
 327        }
 328        if (likely(!skb_is_retransmit(skb))) {
 329                flow->tail->next = skb;
 330                flow->tail = skb;
 331                return;
 332        }
 333
 334        /* This skb is a tcp retransmit,
 335         * find the last retrans packet in the queue
 336         */
 337        prev = NULL;
 338        while (skb_is_retransmit(head)) {
 339                prev = head;
 340                head = head->next;
 341                if (!head)
 342                        break;
 343        }
 344        if (!prev) { /* no rtx packet in queue, become the new head */
 345                skb->next = flow->head;
 346                flow->head = skb;
 347        } else {
 348                if (prev == flow->tail)
 349                        flow->tail = skb;
 350                else
 351                        skb->next = prev->next;
 352                prev->next = skb;
 353        }
 354}
 355
 356static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 357{
 358        struct fq_sched_data *q = qdisc_priv(sch);
 359        struct fq_flow *f;
 360
 361        if (unlikely(sch->q.qlen >= sch->limit))
 362                return qdisc_drop(skb, sch);
 363
 364        f = fq_classify(skb, q);
 365        if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
 366                q->stat_flows_plimit++;
 367                return qdisc_drop(skb, sch);
 368        }
 369
 370        f->qlen++;
 371        if (skb_is_retransmit(skb))
 372                q->stat_tcp_retrans++;
 373        sch->qstats.backlog += qdisc_pkt_len(skb);
 374        if (fq_flow_is_detached(f)) {
 375                fq_flow_add_tail(&q->new_flows, f);
 376                if (time_after(jiffies, f->age + q->flow_refill_delay))
 377                        f->credit = max_t(u32, f->credit, q->quantum);
 378                q->inactive_flows--;
 379                qdisc_unthrottled(sch);
 380        }
 381
 382        /* Note: this overwrites f->age */
 383        flow_queue_add(f, skb);
 384
 385        if (unlikely(f == &q->internal)) {
 386                q->stat_internal_packets++;
 387                qdisc_unthrottled(sch);
 388        }
 389        sch->q.qlen++;
 390
 391        return NET_XMIT_SUCCESS;
 392}
 393
 394static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 395{
 396        struct rb_node *p;
 397
 398        if (q->time_next_delayed_flow > now)
 399                return;
 400
 401        q->time_next_delayed_flow = ~0ULL;
 402        while ((p = rb_first(&q->delayed)) != NULL) {
 403                struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
 404
 405                if (f->time_next_packet > now) {
 406                        q->time_next_delayed_flow = f->time_next_packet;
 407                        break;
 408                }
 409                rb_erase(p, &q->delayed);
 410                q->throttled_flows--;
 411                fq_flow_add_tail(&q->old_flows, f);
 412        }
 413}
 414
 415static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 416{
 417        struct fq_sched_data *q = qdisc_priv(sch);
 418        u64 now = ktime_to_ns(ktime_get());
 419        struct fq_flow_head *head;
 420        struct sk_buff *skb;
 421        struct fq_flow *f;
 422        u32 rate;
 423
 424        skb = fq_dequeue_head(sch, &q->internal);
 425        if (skb)
 426                goto out;
 427        fq_check_throttled(q, now);
 428begin:
 429        head = &q->new_flows;
 430        if (!head->first) {
 431                head = &q->old_flows;
 432                if (!head->first) {
 433                        if (q->time_next_delayed_flow != ~0ULL)
 434                                qdisc_watchdog_schedule_ns(&q->watchdog,
 435                                                           q->time_next_delayed_flow);
 436                        return NULL;
 437                }
 438        }
 439        f = head->first;
 440
 441        if (f->credit <= 0) {
 442                f->credit += q->quantum;
 443                head->first = f->next;
 444                fq_flow_add_tail(&q->old_flows, f);
 445                goto begin;
 446        }
 447
 448        if (unlikely(f->head && now < f->time_next_packet)) {
 449                head->first = f->next;
 450                fq_flow_set_throttled(q, f);
 451                goto begin;
 452        }
 453
 454        skb = fq_dequeue_head(sch, f);
 455        if (!skb) {
 456                head->first = f->next;
 457                /* force a pass through old_flows to prevent starvation */
 458                if ((head == &q->new_flows) && q->old_flows.first) {
 459                        fq_flow_add_tail(&q->old_flows, f);
 460                } else {
 461                        fq_flow_set_detached(f);
 462                        q->inactive_flows++;
 463                }
 464                goto begin;
 465        }
 466        prefetch(&skb->end);
 467        f->time_next_packet = now;
 468        f->credit -= qdisc_pkt_len(skb);
 469
 470        if (f->credit > 0 || !q->rate_enable)
 471                goto out;
 472
 473        rate = q->flow_max_rate;
 474        if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
 475                rate = min(skb->sk->sk_pacing_rate, rate);
 476
 477        if (rate != ~0U) {
 478                u32 plen = max(qdisc_pkt_len(skb), q->quantum);
 479                u64 len = (u64)plen * NSEC_PER_SEC;
 480
 481                if (likely(rate))
 482                        do_div(len, rate);
 483                /* Since socket rate can change later,
 484                 * clamp the delay to 125 ms.
 485                 * TODO: maybe segment the too big skb, as in commit
 486                 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
 487                 */
 488                if (unlikely(len > 125 * NSEC_PER_MSEC)) {
 489                        len = 125 * NSEC_PER_MSEC;
 490                        q->stat_pkts_too_long++;
 491                }
 492
 493                f->time_next_packet = now + len;
 494        }
 495out:
 496        qdisc_bstats_update(sch, skb);
 497        qdisc_unthrottled(sch);
 498        return skb;
 499}
 500
 501static void fq_reset(struct Qdisc *sch)
 502{
 503        struct fq_sched_data *q = qdisc_priv(sch);
 504        struct rb_root *root;
 505        struct sk_buff *skb;
 506        struct rb_node *p;
 507        struct fq_flow *f;
 508        unsigned int idx;
 509
 510        while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
 511                kfree_skb(skb);
 512
 513        if (!q->fq_root)
 514                return;
 515
 516        for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
 517                root = &q->fq_root[idx];
 518                while ((p = rb_first(root)) != NULL) {
 519                        f = container_of(p, struct fq_flow, fq_node);
 520                        rb_erase(p, root);
 521
 522                        while ((skb = fq_dequeue_head(sch, f)) != NULL)
 523                                kfree_skb(skb);
 524
 525                        kmem_cache_free(fq_flow_cachep, f);
 526                }
 527        }
 528        q->new_flows.first      = NULL;
 529        q->old_flows.first      = NULL;
 530        q->delayed              = RB_ROOT;
 531        q->flows                = 0;
 532        q->inactive_flows       = 0;
 533        q->throttled_flows      = 0;
 534}
 535
 536static void fq_rehash(struct fq_sched_data *q,
 537                      struct rb_root *old_array, u32 old_log,
 538                      struct rb_root *new_array, u32 new_log)
 539{
 540        struct rb_node *op, **np, *parent;
 541        struct rb_root *oroot, *nroot;
 542        struct fq_flow *of, *nf;
 543        int fcnt = 0;
 544        u32 idx;
 545
 546        for (idx = 0; idx < (1U << old_log); idx++) {
 547                oroot = &old_array[idx];
 548                while ((op = rb_first(oroot)) != NULL) {
 549                        rb_erase(op, oroot);
 550                        of = container_of(op, struct fq_flow, fq_node);
 551                        if (fq_gc_candidate(of)) {
 552                                fcnt++;
 553                                kmem_cache_free(fq_flow_cachep, of);
 554                                continue;
 555                        }
 556                        nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
 557
 558                        np = &nroot->rb_node;
 559                        parent = NULL;
 560                        while (*np) {
 561                                parent = *np;
 562
 563                                nf = container_of(parent, struct fq_flow, fq_node);
 564                                BUG_ON(nf->sk == of->sk);
 565
 566                                if (nf->sk > of->sk)
 567                                        np = &parent->rb_right;
 568                                else
 569                                        np = &parent->rb_left;
 570                        }
 571
 572                        rb_link_node(&of->fq_node, parent, np);
 573                        rb_insert_color(&of->fq_node, nroot);
 574                }
 575        }
 576        q->flows -= fcnt;
 577        q->inactive_flows -= fcnt;
 578        q->stat_gc_flows += fcnt;
 579}
 580
 581static int fq_resize(struct fq_sched_data *q, u32 log)
 582{
 583        struct rb_root *array;
 584        u32 idx;
 585
 586        if (q->fq_root && log == q->fq_trees_log)
 587                return 0;
 588
 589        array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL);
 590        if (!array)
 591                return -ENOMEM;
 592
 593        for (idx = 0; idx < (1U << log); idx++)
 594                array[idx] = RB_ROOT;
 595
 596        if (q->fq_root) {
 597                fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
 598                kfree(q->fq_root);
 599        }
 600        q->fq_root = array;
 601        q->fq_trees_log = log;
 602
 603        return 0;
 604}
 605
 606static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 607        [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
 608        [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
 609        [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
 610        [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
 611        [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
 612        [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
 613        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
 614        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
 615        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
 616};
 617
 618static int fq_change(struct Qdisc *sch, struct nlattr *opt)
 619{
 620        struct fq_sched_data *q = qdisc_priv(sch);
 621        struct nlattr *tb[TCA_FQ_MAX + 1];
 622        int err, drop_count = 0;
 623        u32 fq_log;
 624
 625        if (!opt)
 626                return -EINVAL;
 627
 628        err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
 629        if (err < 0)
 630                return err;
 631
 632        sch_tree_lock(sch);
 633
 634        fq_log = q->fq_trees_log;
 635
 636        if (tb[TCA_FQ_BUCKETS_LOG]) {
 637                u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
 638
 639                if (nval >= 1 && nval <= ilog2(256*1024))
 640                        fq_log = nval;
 641                else
 642                        err = -EINVAL;
 643        }
 644        if (tb[TCA_FQ_PLIMIT])
 645                sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
 646
 647        if (tb[TCA_FQ_FLOW_PLIMIT])
 648                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 649
 650        if (tb[TCA_FQ_QUANTUM])
 651                q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 652
 653        if (tb[TCA_FQ_INITIAL_QUANTUM])
 654                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 655
 656        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
 657                pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
 658                                    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
 659
 660        if (tb[TCA_FQ_FLOW_MAX_RATE])
 661                q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
 662
 663        if (tb[TCA_FQ_RATE_ENABLE]) {
 664                u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
 665
 666                if (enable <= 1)
 667                        q->rate_enable = enable;
 668                else
 669                        err = -EINVAL;
 670        }
 671
 672        if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
 673                u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
 674
 675                q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
 676        }
 677
 678        if (!err)
 679                err = fq_resize(q, fq_log);
 680
 681        while (sch->q.qlen > sch->limit) {
 682                struct sk_buff *skb = fq_dequeue(sch);
 683
 684                if (!skb)
 685                        break;
 686                kfree_skb(skb);
 687                drop_count++;
 688        }
 689        qdisc_tree_decrease_qlen(sch, drop_count);
 690
 691        sch_tree_unlock(sch);
 692        return err;
 693}
 694
 695static void fq_destroy(struct Qdisc *sch)
 696{
 697        struct fq_sched_data *q = qdisc_priv(sch);
 698
 699        fq_reset(sch);
 700        kfree(q->fq_root);
 701        qdisc_watchdog_cancel(&q->watchdog);
 702}
 703
 704static int fq_init(struct Qdisc *sch, struct nlattr *opt)
 705{
 706        struct fq_sched_data *q = qdisc_priv(sch);
 707        int err;
 708
 709        sch->limit              = 10000;
 710        q->flow_plimit          = 100;
 711        q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
 712        q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
 713        q->flow_refill_delay    = msecs_to_jiffies(40);
 714        q->flow_max_rate        = ~0U;
 715        q->rate_enable          = 1;
 716        q->new_flows.first      = NULL;
 717        q->old_flows.first      = NULL;
 718        q->delayed              = RB_ROOT;
 719        q->fq_root              = NULL;
 720        q->fq_trees_log         = ilog2(1024);
 721        qdisc_watchdog_init(&q->watchdog, sch);
 722
 723        if (opt)
 724                err = fq_change(sch, opt);
 725        else
 726                err = fq_resize(q, q->fq_trees_log);
 727
 728        return err;
 729}
 730
 731static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 732{
 733        struct fq_sched_data *q = qdisc_priv(sch);
 734        struct nlattr *opts;
 735
 736        opts = nla_nest_start(skb, TCA_OPTIONS);
 737        if (opts == NULL)
 738                goto nla_put_failure;
 739
 740        /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
 741
 742        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
 743            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
 744            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
 745            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
 746            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
 747            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
 748            nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
 749                        jiffies_to_usecs(q->flow_refill_delay)) ||
 750            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
 751                goto nla_put_failure;
 752
 753        nla_nest_end(skb, opts);
 754        return skb->len;
 755
 756nla_put_failure:
 757        return -1;
 758}
 759
 760static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 761{
 762        struct fq_sched_data *q = qdisc_priv(sch);
 763        u64 now = ktime_to_ns(ktime_get());
 764        struct tc_fq_qd_stats st = {
 765                .gc_flows               = q->stat_gc_flows,
 766                .highprio_packets       = q->stat_internal_packets,
 767                .tcp_retrans            = q->stat_tcp_retrans,
 768                .throttled              = q->stat_throttled,
 769                .flows_plimit           = q->stat_flows_plimit,
 770                .pkts_too_long          = q->stat_pkts_too_long,
 771                .allocation_errors      = q->stat_allocation_errors,
 772                .flows                  = q->flows,
 773                .inactive_flows         = q->inactive_flows,
 774                .throttled_flows        = q->throttled_flows,
 775                .time_next_delayed_flow = q->time_next_delayed_flow - now,
 776        };
 777
 778        return gnet_stats_copy_app(d, &st, sizeof(st));
 779}
 780
 781static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
 782        .id             =       "fq",
 783        .priv_size      =       sizeof(struct fq_sched_data),
 784
 785        .enqueue        =       fq_enqueue,
 786        .dequeue        =       fq_dequeue,
 787        .peek           =       qdisc_peek_dequeued,
 788        .init           =       fq_init,
 789        .reset          =       fq_reset,
 790        .destroy        =       fq_destroy,
 791        .change         =       fq_change,
 792        .dump           =       fq_dump,
 793        .dump_stats     =       fq_dump_stats,
 794        .owner          =       THIS_MODULE,
 795};
 796
 797static int __init fq_module_init(void)
 798{
 799        int ret;
 800
 801        fq_flow_cachep = kmem_cache_create("fq_flow_cache",
 802                                           sizeof(struct fq_flow),
 803                                           0, 0, NULL);
 804        if (!fq_flow_cachep)
 805                return -ENOMEM;
 806
 807        ret = register_qdisc(&fq_qdisc_ops);
 808        if (ret)
 809                kmem_cache_destroy(fq_flow_cachep);
 810        return ret;
 811}
 812
 813static void __exit fq_module_exit(void)
 814{
 815        unregister_qdisc(&fq_qdisc_ops);
 816        kmem_cache_destroy(fq_flow_cachep);
 817}
 818
 819module_init(fq_module_init)
 820module_exit(fq_module_exit)
 821MODULE_AUTHOR("Eric Dumazet");
 822MODULE_LICENSE("GPL");
 823