linux/net/sched/sch_fq.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
   3 *
   4 *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 *
  11 *  Meant to be mostly used for locally generated traffic :
  12 *  Fast classification depends on skb->sk being set before reaching us.
  13 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
  14 *  All packets belonging to a socket are considered as a 'flow'.
  15 *
  16 *  Flows are dynamically allocated and stored in a hash table of RB trees
  17 *  They are also part of one Round Robin 'queues' (new or old flows)
  18 *
  19 *  Burst avoidance (aka pacing) capability :
  20 *
  21 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
  22 *  bunch of packets, and this packet scheduler adds delay between
  23 *  packets to respect rate limitation.
  24 *
  25 *  enqueue() :
  26 *   - lookup one RB tree (out of 1024 or more) to find the flow.
  27 *     If non existent flow, create it, add it to the tree.
  28 *     Add skb to the per flow list of skb (fifo).
  29 *   - Use a special fifo for high prio packets
  30 *
  31 *  dequeue() : serves flows in Round Robin
  32 *  Note : When a flow becomes empty, we do not immediately remove it from
  33 *  rb trees, for performance reasons (its expected to send additional packets,
  34 *  or SLAB cache will reuse socket for another flow)
  35 */
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/kernel.h>
  40#include <linux/jiffies.h>
  41#include <linux/string.h>
  42#include <linux/in.h>
  43#include <linux/errno.h>
  44#include <linux/init.h>
  45#include <linux/skbuff.h>
  46#include <linux/slab.h>
  47#include <linux/rbtree.h>
  48#include <linux/hash.h>
  49#include <linux/prefetch.h>
  50#include <linux/vmalloc.h>
  51#include <net/netlink.h>
  52#include <net/pkt_sched.h>
  53#include <net/sock.h>
  54#include <net/tcp_states.h>
  55#include <net/tcp.h>
  56
  57/*
  58 * Per flow structure, dynamically allocated
  59 */
  60struct fq_flow {
  61        struct sk_buff  *head;          /* list of skbs for this flow : first skb */
  62        union {
  63                struct sk_buff *tail;   /* last skb in the list */
  64                unsigned long  age;     /* jiffies when flow was emptied, for gc */
  65        };
  66        struct rb_node  fq_node;        /* anchor in fq_root[] trees */
  67        struct sock     *sk;
  68        int             qlen;           /* number of packets in flow queue */
  69        int             credit;
  70        u32             socket_hash;    /* sk_hash */
  71        struct fq_flow *next;           /* next pointer in RR lists, or &detached */
  72
  73        struct rb_node  rate_node;      /* anchor in q->delayed tree */
  74        u64             time_next_packet;
  75};
  76
  77struct fq_flow_head {
  78        struct fq_flow *first;
  79        struct fq_flow *last;
  80};
  81
  82struct fq_sched_data {
  83        struct fq_flow_head new_flows;
  84
  85        struct fq_flow_head old_flows;
  86
  87        struct rb_root  delayed;        /* for rate limited flows */
  88        u64             time_next_delayed_flow;
  89        unsigned long   unthrottle_latency_ns;
  90
  91        struct fq_flow  internal;       /* for non classified or high prio packets */
  92        u32             quantum;
  93        u32             initial_quantum;
  94        u32             flow_refill_delay;
  95        u32             flow_plimit;    /* max packets per flow */
  96        unsigned long   flow_max_rate;  /* optional max rate per flow */
  97        u32             orphan_mask;    /* mask for orphaned skb */
  98        u32             low_rate_threshold;
  99        struct rb_root  *fq_root;
 100        u8              rate_enable;
 101        u8              fq_trees_log;
 102
 103        u32             flows;
 104        u32             inactive_flows;
 105        u32             throttled_flows;
 106
 107        u64             stat_gc_flows;
 108        u64             stat_internal_packets;
 109        u64             stat_throttled;
 110        u64             stat_flows_plimit;
 111        u64             stat_pkts_too_long;
 112        u64             stat_allocation_errors;
 113        struct qdisc_watchdog watchdog;
 114};
 115
 116/* special value to mark a detached flow (not on old/new list) */
 117static struct fq_flow detached, throttled;
 118
 119static void fq_flow_set_detached(struct fq_flow *f)
 120{
 121        f->next = &detached;
 122        f->age = jiffies;
 123}
 124
 125static bool fq_flow_is_detached(const struct fq_flow *f)
 126{
 127        return f->next == &detached;
 128}
 129
 130static bool fq_flow_is_throttled(const struct fq_flow *f)
 131{
 132        return f->next == &throttled;
 133}
 134
 135static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
 136{
 137        if (head->first)
 138                head->last->next = flow;
 139        else
 140                head->first = flow;
 141        head->last = flow;
 142        flow->next = NULL;
 143}
 144
 145static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
 146{
 147        rb_erase(&f->rate_node, &q->delayed);
 148        q->throttled_flows--;
 149        fq_flow_add_tail(&q->old_flows, f);
 150}
 151
 152static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 153{
 154        struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
 155
 156        while (*p) {
 157                struct fq_flow *aux;
 158
 159                parent = *p;
 160                aux = rb_entry(parent, struct fq_flow, rate_node);
 161                if (f->time_next_packet >= aux->time_next_packet)
 162                        p = &parent->rb_right;
 163                else
 164                        p = &parent->rb_left;
 165        }
 166        rb_link_node(&f->rate_node, parent, p);
 167        rb_insert_color(&f->rate_node, &q->delayed);
 168        q->throttled_flows++;
 169        q->stat_throttled++;
 170
 171        f->next = &throttled;
 172        if (q->time_next_delayed_flow > f->time_next_packet)
 173                q->time_next_delayed_flow = f->time_next_packet;
 174}
 175
 176
 177static struct kmem_cache *fq_flow_cachep __read_mostly;
 178
 179
 180/* limit number of collected flows per round */
 181#define FQ_GC_MAX 8
 182#define FQ_GC_AGE (3*HZ)
 183
 184static bool fq_gc_candidate(const struct fq_flow *f)
 185{
 186        return fq_flow_is_detached(f) &&
 187               time_after(jiffies, f->age + FQ_GC_AGE);
 188}
 189
 190static void fq_gc(struct fq_sched_data *q,
 191                  struct rb_root *root,
 192                  struct sock *sk)
 193{
 194        struct fq_flow *f, *tofree[FQ_GC_MAX];
 195        struct rb_node **p, *parent;
 196        int fcnt = 0;
 197
 198        p = &root->rb_node;
 199        parent = NULL;
 200        while (*p) {
 201                parent = *p;
 202
 203                f = rb_entry(parent, struct fq_flow, fq_node);
 204                if (f->sk == sk)
 205                        break;
 206
 207                if (fq_gc_candidate(f)) {
 208                        tofree[fcnt++] = f;
 209                        if (fcnt == FQ_GC_MAX)
 210                                break;
 211                }
 212
 213                if (f->sk > sk)
 214                        p = &parent->rb_right;
 215                else
 216                        p = &parent->rb_left;
 217        }
 218
 219        q->flows -= fcnt;
 220        q->inactive_flows -= fcnt;
 221        q->stat_gc_flows += fcnt;
 222        while (fcnt) {
 223                struct fq_flow *f = tofree[--fcnt];
 224
 225                rb_erase(&f->fq_node, root);
 226                kmem_cache_free(fq_flow_cachep, f);
 227        }
 228}
 229
 230static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 231{
 232        struct rb_node **p, *parent;
 233        struct sock *sk = skb->sk;
 234        struct rb_root *root;
 235        struct fq_flow *f;
 236
 237        /* warning: no starvation prevention... */
 238        if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
 239                return &q->internal;
 240
 241        /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
 242         * or a listener (SYNCOOKIE mode)
 243         * 1) request sockets are not full blown,
 244         *    they do not contain sk_pacing_rate
 245         * 2) They are not part of a 'flow' yet
 246         * 3) We do not want to rate limit them (eg SYNFLOOD attack),
 247         *    especially if the listener set SO_MAX_PACING_RATE
 248         * 4) We pretend they are orphaned
 249         */
 250        if (!sk || sk_listener(sk)) {
 251                unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
 252
 253                /* By forcing low order bit to 1, we make sure to not
 254                 * collide with a local flow (socket pointers are word aligned)
 255                 */
 256                sk = (struct sock *)((hash << 1) | 1UL);
 257                skb_orphan(skb);
 258        }
 259
 260        root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
 261
 262        if (q->flows >= (2U << q->fq_trees_log) &&
 263            q->inactive_flows > q->flows/2)
 264                fq_gc(q, root, sk);
 265
 266        p = &root->rb_node;
 267        parent = NULL;
 268        while (*p) {
 269                parent = *p;
 270
 271                f = rb_entry(parent, struct fq_flow, fq_node);
 272                if (f->sk == sk) {
 273                        /* socket might have been reallocated, so check
 274                         * if its sk_hash is the same.
 275                         * It not, we need to refill credit with
 276                         * initial quantum
 277                         */
 278                        if (unlikely(skb->sk &&
 279                                     f->socket_hash != sk->sk_hash)) {
 280                                f->credit = q->initial_quantum;
 281                                f->socket_hash = sk->sk_hash;
 282                                if (fq_flow_is_throttled(f))
 283                                        fq_flow_unset_throttled(q, f);
 284                                f->time_next_packet = 0ULL;
 285                        }
 286                        return f;
 287                }
 288                if (f->sk > sk)
 289                        p = &parent->rb_right;
 290                else
 291                        p = &parent->rb_left;
 292        }
 293
 294        f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
 295        if (unlikely(!f)) {
 296                q->stat_allocation_errors++;
 297                return &q->internal;
 298        }
 299        fq_flow_set_detached(f);
 300        f->sk = sk;
 301        if (skb->sk)
 302                f->socket_hash = sk->sk_hash;
 303        f->credit = q->initial_quantum;
 304
 305        rb_link_node(&f->fq_node, parent, p);
 306        rb_insert_color(&f->fq_node, root);
 307
 308        q->flows++;
 309        q->inactive_flows++;
 310        return f;
 311}
 312
 313
 314/* remove one skb from head of flow queue */
 315static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
 316{
 317        struct sk_buff *skb = flow->head;
 318
 319        if (skb) {
 320                flow->head = skb->next;
 321                skb_mark_not_on_list(skb);
 322                flow->qlen--;
 323                qdisc_qstats_backlog_dec(sch, skb);
 324                sch->q.qlen--;
 325        }
 326        return skb;
 327}
 328
 329static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
 330{
 331        struct sk_buff *head = flow->head;
 332
 333        skb->next = NULL;
 334        if (!head)
 335                flow->head = skb;
 336        else
 337                flow->tail->next = skb;
 338
 339        flow->tail = skb;
 340}
 341
 342static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 343                      struct sk_buff **to_free)
 344{
 345        struct fq_sched_data *q = qdisc_priv(sch);
 346        struct fq_flow *f;
 347
 348        if (unlikely(sch->q.qlen >= sch->limit))
 349                return qdisc_drop(skb, sch, to_free);
 350
 351        f = fq_classify(skb, q);
 352        if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
 353                q->stat_flows_plimit++;
 354                return qdisc_drop(skb, sch, to_free);
 355        }
 356
 357        f->qlen++;
 358        qdisc_qstats_backlog_inc(sch, skb);
 359        if (fq_flow_is_detached(f)) {
 360                struct sock *sk = skb->sk;
 361
 362                fq_flow_add_tail(&q->new_flows, f);
 363                if (time_after(jiffies, f->age + q->flow_refill_delay))
 364                        f->credit = max_t(u32, f->credit, q->quantum);
 365                if (sk && q->rate_enable) {
 366                        if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
 367                                     SK_PACING_FQ))
 368                                smp_store_release(&sk->sk_pacing_status,
 369                                                  SK_PACING_FQ);
 370                }
 371                q->inactive_flows--;
 372        }
 373
 374        /* Note: this overwrites f->age */
 375        flow_queue_add(f, skb);
 376
 377        if (unlikely(f == &q->internal)) {
 378                q->stat_internal_packets++;
 379        }
 380        sch->q.qlen++;
 381
 382        return NET_XMIT_SUCCESS;
 383}
 384
 385static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 386{
 387        unsigned long sample;
 388        struct rb_node *p;
 389
 390        if (q->time_next_delayed_flow > now)
 391                return;
 392
 393        /* Update unthrottle latency EWMA.
 394         * This is cheap and can help diagnosing timer/latency problems.
 395         */
 396        sample = (unsigned long)(now - q->time_next_delayed_flow);
 397        q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
 398        q->unthrottle_latency_ns += sample >> 3;
 399
 400        q->time_next_delayed_flow = ~0ULL;
 401        while ((p = rb_first(&q->delayed)) != NULL) {
 402                struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
 403
 404                if (f->time_next_packet > now) {
 405                        q->time_next_delayed_flow = f->time_next_packet;
 406                        break;
 407                }
 408                fq_flow_unset_throttled(q, f);
 409        }
 410}
 411
 412static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 413{
 414        struct fq_sched_data *q = qdisc_priv(sch);
 415        u64 now = ktime_get_ns();
 416        struct fq_flow_head *head;
 417        struct sk_buff *skb;
 418        struct fq_flow *f;
 419        unsigned long rate;
 420        u32 plen;
 421
 422        skb = fq_dequeue_head(sch, &q->internal);
 423        if (skb)
 424                goto out;
 425        fq_check_throttled(q, now);
 426begin:
 427        head = &q->new_flows;
 428        if (!head->first) {
 429                head = &q->old_flows;
 430                if (!head->first) {
 431                        if (q->time_next_delayed_flow != ~0ULL)
 432                                qdisc_watchdog_schedule_ns(&q->watchdog,
 433                                                           q->time_next_delayed_flow);
 434                        return NULL;
 435                }
 436        }
 437        f = head->first;
 438
 439        if (f->credit <= 0) {
 440                f->credit += q->quantum;
 441                head->first = f->next;
 442                fq_flow_add_tail(&q->old_flows, f);
 443                goto begin;
 444        }
 445
 446        skb = f->head;
 447        if (skb) {
 448                u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp),
 449                                             f->time_next_packet);
 450
 451                if (now < time_next_packet) {
 452                        head->first = f->next;
 453                        f->time_next_packet = time_next_packet;
 454                        fq_flow_set_throttled(q, f);
 455                        goto begin;
 456                }
 457        }
 458
 459        skb = fq_dequeue_head(sch, f);
 460        if (!skb) {
 461                head->first = f->next;
 462                /* force a pass through old_flows to prevent starvation */
 463                if ((head == &q->new_flows) && q->old_flows.first) {
 464                        fq_flow_add_tail(&q->old_flows, f);
 465                } else {
 466                        fq_flow_set_detached(f);
 467                        q->inactive_flows++;
 468                }
 469                goto begin;
 470        }
 471        prefetch(&skb->end);
 472        plen = qdisc_pkt_len(skb);
 473        f->credit -= plen;
 474
 475        if (!q->rate_enable)
 476                goto out;
 477
 478        rate = q->flow_max_rate;
 479
 480        /* If EDT time was provided for this skb, we need to
 481         * update f->time_next_packet only if this qdisc enforces
 482         * a flow max rate.
 483         */
 484        if (!skb->tstamp) {
 485                if (skb->sk)
 486                        rate = min(skb->sk->sk_pacing_rate, rate);
 487
 488                if (rate <= q->low_rate_threshold) {
 489                        f->credit = 0;
 490                } else {
 491                        plen = max(plen, q->quantum);
 492                        if (f->credit > 0)
 493                                goto out;
 494                }
 495        }
 496        if (rate != ~0UL) {
 497                u64 len = (u64)plen * NSEC_PER_SEC;
 498
 499                if (likely(rate))
 500                        len = div64_ul(len, rate);
 501                /* Since socket rate can change later,
 502                 * clamp the delay to 1 second.
 503                 * Really, providers of too big packets should be fixed !
 504                 */
 505                if (unlikely(len > NSEC_PER_SEC)) {
 506                        len = NSEC_PER_SEC;
 507                        q->stat_pkts_too_long++;
 508                }
 509                /* Account for schedule/timers drifts.
 510                 * f->time_next_packet was set when prior packet was sent,
 511                 * and current time (@now) can be too late by tens of us.
 512                 */
 513                if (f->time_next_packet)
 514                        len -= min(len/2, now - f->time_next_packet);
 515                f->time_next_packet = now + len;
 516        }
 517out:
 518        qdisc_bstats_update(sch, skb);
 519        return skb;
 520}
 521
 522static void fq_flow_purge(struct fq_flow *flow)
 523{
 524        rtnl_kfree_skbs(flow->head, flow->tail);
 525        flow->head = NULL;
 526        flow->qlen = 0;
 527}
 528
 529static void fq_reset(struct Qdisc *sch)
 530{
 531        struct fq_sched_data *q = qdisc_priv(sch);
 532        struct rb_root *root;
 533        struct rb_node *p;
 534        struct fq_flow *f;
 535        unsigned int idx;
 536
 537        sch->q.qlen = 0;
 538        sch->qstats.backlog = 0;
 539
 540        fq_flow_purge(&q->internal);
 541
 542        if (!q->fq_root)
 543                return;
 544
 545        for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
 546                root = &q->fq_root[idx];
 547                while ((p = rb_first(root)) != NULL) {
 548                        f = rb_entry(p, struct fq_flow, fq_node);
 549                        rb_erase(p, root);
 550
 551                        fq_flow_purge(f);
 552
 553                        kmem_cache_free(fq_flow_cachep, f);
 554                }
 555        }
 556        q->new_flows.first      = NULL;
 557        q->old_flows.first      = NULL;
 558        q->delayed              = RB_ROOT;
 559        q->flows                = 0;
 560        q->inactive_flows       = 0;
 561        q->throttled_flows      = 0;
 562}
 563
 564static void fq_rehash(struct fq_sched_data *q,
 565                      struct rb_root *old_array, u32 old_log,
 566                      struct rb_root *new_array, u32 new_log)
 567{
 568        struct rb_node *op, **np, *parent;
 569        struct rb_root *oroot, *nroot;
 570        struct fq_flow *of, *nf;
 571        int fcnt = 0;
 572        u32 idx;
 573
 574        for (idx = 0; idx < (1U << old_log); idx++) {
 575                oroot = &old_array[idx];
 576                while ((op = rb_first(oroot)) != NULL) {
 577                        rb_erase(op, oroot);
 578                        of = rb_entry(op, struct fq_flow, fq_node);
 579                        if (fq_gc_candidate(of)) {
 580                                fcnt++;
 581                                kmem_cache_free(fq_flow_cachep, of);
 582                                continue;
 583                        }
 584                        nroot = &new_array[hash_ptr(of->sk, new_log)];
 585
 586                        np = &nroot->rb_node;
 587                        parent = NULL;
 588                        while (*np) {
 589                                parent = *np;
 590
 591                                nf = rb_entry(parent, struct fq_flow, fq_node);
 592                                BUG_ON(nf->sk == of->sk);
 593
 594                                if (nf->sk > of->sk)
 595                                        np = &parent->rb_right;
 596                                else
 597                                        np = &parent->rb_left;
 598                        }
 599
 600                        rb_link_node(&of->fq_node, parent, np);
 601                        rb_insert_color(&of->fq_node, nroot);
 602                }
 603        }
 604        q->flows -= fcnt;
 605        q->inactive_flows -= fcnt;
 606        q->stat_gc_flows += fcnt;
 607}
 608
 609static void fq_free(void *addr)
 610{
 611        kvfree(addr);
 612}
 613
 614static int fq_resize(struct Qdisc *sch, u32 log)
 615{
 616        struct fq_sched_data *q = qdisc_priv(sch);
 617        struct rb_root *array;
 618        void *old_fq_root;
 619        u32 idx;
 620
 621        if (q->fq_root && log == q->fq_trees_log)
 622                return 0;
 623
 624        /* If XPS was setup, we can allocate memory on right NUMA node */
 625        array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
 626                              netdev_queue_numa_node_read(sch->dev_queue));
 627        if (!array)
 628                return -ENOMEM;
 629
 630        for (idx = 0; idx < (1U << log); idx++)
 631                array[idx] = RB_ROOT;
 632
 633        sch_tree_lock(sch);
 634
 635        old_fq_root = q->fq_root;
 636        if (old_fq_root)
 637                fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
 638
 639        q->fq_root = array;
 640        q->fq_trees_log = log;
 641
 642        sch_tree_unlock(sch);
 643
 644        fq_free(old_fq_root);
 645
 646        return 0;
 647}
 648
 649static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 650        [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
 651        [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
 652        [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
 653        [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
 654        [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
 655        [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
 656        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
 657        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
 658        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
 659        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
 660};
 661
 662static int fq_change(struct Qdisc *sch, struct nlattr *opt,
 663                     struct netlink_ext_ack *extack)
 664{
 665        struct fq_sched_data *q = qdisc_priv(sch);
 666        struct nlattr *tb[TCA_FQ_MAX + 1];
 667        int err, drop_count = 0;
 668        unsigned drop_len = 0;
 669        u32 fq_log;
 670
 671        if (!opt)
 672                return -EINVAL;
 673
 674        err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
 675        if (err < 0)
 676                return err;
 677
 678        sch_tree_lock(sch);
 679
 680        fq_log = q->fq_trees_log;
 681
 682        if (tb[TCA_FQ_BUCKETS_LOG]) {
 683                u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
 684
 685                if (nval >= 1 && nval <= ilog2(256*1024))
 686                        fq_log = nval;
 687                else
 688                        err = -EINVAL;
 689        }
 690        if (tb[TCA_FQ_PLIMIT])
 691                sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
 692
 693        if (tb[TCA_FQ_FLOW_PLIMIT])
 694                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 695
 696        if (tb[TCA_FQ_QUANTUM]) {
 697                u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 698
 699                if (quantum > 0)
 700                        q->quantum = quantum;
 701                else
 702                        err = -EINVAL;
 703        }
 704
 705        if (tb[TCA_FQ_INITIAL_QUANTUM])
 706                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 707
 708        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
 709                pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
 710                                    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
 711
 712        if (tb[TCA_FQ_FLOW_MAX_RATE]) {
 713                u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
 714
 715                q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
 716        }
 717        if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
 718                q->low_rate_threshold =
 719                        nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
 720
 721        if (tb[TCA_FQ_RATE_ENABLE]) {
 722                u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
 723
 724                if (enable <= 1)
 725                        q->rate_enable = enable;
 726                else
 727                        err = -EINVAL;
 728        }
 729
 730        if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
 731                u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
 732
 733                q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
 734        }
 735
 736        if (tb[TCA_FQ_ORPHAN_MASK])
 737                q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
 738
 739        if (!err) {
 740                sch_tree_unlock(sch);
 741                err = fq_resize(sch, fq_log);
 742                sch_tree_lock(sch);
 743        }
 744        while (sch->q.qlen > sch->limit) {
 745                struct sk_buff *skb = fq_dequeue(sch);
 746
 747                if (!skb)
 748                        break;
 749                drop_len += qdisc_pkt_len(skb);
 750                rtnl_kfree_skbs(skb, skb);
 751                drop_count++;
 752        }
 753        qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
 754
 755        sch_tree_unlock(sch);
 756        return err;
 757}
 758
 759static void fq_destroy(struct Qdisc *sch)
 760{
 761        struct fq_sched_data *q = qdisc_priv(sch);
 762
 763        fq_reset(sch);
 764        fq_free(q->fq_root);
 765        qdisc_watchdog_cancel(&q->watchdog);
 766}
 767
 768static int fq_init(struct Qdisc *sch, struct nlattr *opt,
 769                   struct netlink_ext_ack *extack)
 770{
 771        struct fq_sched_data *q = qdisc_priv(sch);
 772        int err;
 773
 774        sch->limit              = 10000;
 775        q->flow_plimit          = 100;
 776        q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
 777        q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
 778        q->flow_refill_delay    = msecs_to_jiffies(40);
 779        q->flow_max_rate        = ~0UL;
 780        q->time_next_delayed_flow = ~0ULL;
 781        q->rate_enable          = 1;
 782        q->new_flows.first      = NULL;
 783        q->old_flows.first      = NULL;
 784        q->delayed              = RB_ROOT;
 785        q->fq_root              = NULL;
 786        q->fq_trees_log         = ilog2(1024);
 787        q->orphan_mask          = 1024 - 1;
 788        q->low_rate_threshold   = 550000 / 8;
 789        qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
 790
 791        if (opt)
 792                err = fq_change(sch, opt, extack);
 793        else
 794                err = fq_resize(sch, q->fq_trees_log);
 795
 796        return err;
 797}
 798
 799static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 800{
 801        struct fq_sched_data *q = qdisc_priv(sch);
 802        struct nlattr *opts;
 803
 804        opts = nla_nest_start(skb, TCA_OPTIONS);
 805        if (opts == NULL)
 806                goto nla_put_failure;
 807
 808        /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
 809
 810        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
 811            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
 812            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
 813            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
 814            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
 815            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
 816                        min_t(unsigned long, q->flow_max_rate, ~0U)) ||
 817            nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
 818                        jiffies_to_usecs(q->flow_refill_delay)) ||
 819            nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
 820            nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
 821                        q->low_rate_threshold) ||
 822            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
 823                goto nla_put_failure;
 824
 825        return nla_nest_end(skb, opts);
 826
 827nla_put_failure:
 828        return -1;
 829}
 830
 831static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 832{
 833        struct fq_sched_data *q = qdisc_priv(sch);
 834        struct tc_fq_qd_stats st;
 835
 836        sch_tree_lock(sch);
 837
 838        st.gc_flows               = q->stat_gc_flows;
 839        st.highprio_packets       = q->stat_internal_packets;
 840        st.tcp_retrans            = 0;
 841        st.throttled              = q->stat_throttled;
 842        st.flows_plimit           = q->stat_flows_plimit;
 843        st.pkts_too_long          = q->stat_pkts_too_long;
 844        st.allocation_errors      = q->stat_allocation_errors;
 845        st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
 846        st.flows                  = q->flows;
 847        st.inactive_flows         = q->inactive_flows;
 848        st.throttled_flows        = q->throttled_flows;
 849        st.unthrottle_latency_ns  = min_t(unsigned long,
 850                                          q->unthrottle_latency_ns, ~0U);
 851        sch_tree_unlock(sch);
 852
 853        return gnet_stats_copy_app(d, &st, sizeof(st));
 854}
 855
 856static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
 857        .id             =       "fq",
 858        .priv_size      =       sizeof(struct fq_sched_data),
 859
 860        .enqueue        =       fq_enqueue,
 861        .dequeue        =       fq_dequeue,
 862        .peek           =       qdisc_peek_dequeued,
 863        .init           =       fq_init,
 864        .reset          =       fq_reset,
 865        .destroy        =       fq_destroy,
 866        .change         =       fq_change,
 867        .dump           =       fq_dump,
 868        .dump_stats     =       fq_dump_stats,
 869        .owner          =       THIS_MODULE,
 870};
 871
 872static int __init fq_module_init(void)
 873{
 874        int ret;
 875
 876        fq_flow_cachep = kmem_cache_create("fq_flow_cache",
 877                                           sizeof(struct fq_flow),
 878                                           0, 0, NULL);
 879        if (!fq_flow_cachep)
 880                return -ENOMEM;
 881
 882        ret = register_qdisc(&fq_qdisc_ops);
 883        if (ret)
 884                kmem_cache_destroy(fq_flow_cachep);
 885        return ret;
 886}
 887
 888static void __exit fq_module_exit(void)
 889{
 890        unregister_qdisc(&fq_qdisc_ops);
 891        kmem_cache_destroy(fq_flow_cachep);
 892}
 893
 894module_init(fq_module_init)
 895module_exit(fq_module_exit)
 896MODULE_AUTHOR("Eric Dumazet");
 897MODULE_LICENSE("GPL");
 898