linux/net/sched/sch_fq.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
   3 *
   4 *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 *
  11 *  Meant to be mostly used for locally generated traffic :
  12 *  Fast classification depends on skb->sk being set before reaching us.
  13 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
  14 *  All packets belonging to a socket are considered as a 'flow'.
  15 *
  16 *  Flows are dynamically allocated and stored in a hash table of RB trees
  17 *  They are also part of one Round Robin 'queues' (new or old flows)
  18 *
  19 *  Burst avoidance (aka pacing) capability :
  20 *
  21 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
  22 *  bunch of packets, and this packet scheduler adds delay between
  23 *  packets to respect rate limitation.
  24 *
  25 *  enqueue() :
  26 *   - lookup one RB tree (out of 1024 or more) to find the flow.
  27 *     If non existent flow, create it, add it to the tree.
  28 *     Add skb to the per flow list of skb (fifo).
  29 *   - Use a special fifo for high prio packets
  30 *
  31 *  dequeue() : serves flows in Round Robin
  32 *  Note : When a flow becomes empty, we do not immediately remove it from
  33 *  rb trees, for performance reasons (its expected to send additional packets,
  34 *  or SLAB cache will reuse socket for another flow)
  35 */
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/kernel.h>
  40#include <linux/jiffies.h>
  41#include <linux/string.h>
  42#include <linux/in.h>
  43#include <linux/errno.h>
  44#include <linux/init.h>
  45#include <linux/skbuff.h>
  46#include <linux/slab.h>
  47#include <linux/rbtree.h>
  48#include <linux/hash.h>
  49#include <linux/prefetch.h>
  50#include <linux/vmalloc.h>
  51#include <net/netlink.h>
  52#include <net/pkt_sched.h>
  53#include <net/sock.h>
  54#include <net/tcp_states.h>
  55#include <net/tcp.h>
  56
  57/*
  58 * Per flow structure, dynamically allocated
  59 */
  60struct fq_flow {
  61        struct sk_buff  *head;          /* list of skbs for this flow : first skb */
  62        union {
  63                struct sk_buff *tail;   /* last skb in the list */
  64                unsigned long  age;     /* jiffies when flow was emptied, for gc */
  65        };
  66        struct rb_node  fq_node;        /* anchor in fq_root[] trees */
  67        struct sock     *sk;
  68        int             qlen;           /* number of packets in flow queue */
  69        int             credit;
  70        u32             socket_hash;    /* sk_hash */
  71        struct fq_flow *next;           /* next pointer in RR lists, or &detached */
  72
  73        struct rb_node  rate_node;      /* anchor in q->delayed tree */
  74        u64             time_next_packet;
  75};
  76
  77struct fq_flow_head {
  78        struct fq_flow *first;
  79        struct fq_flow *last;
  80};
  81
  82struct fq_sched_data {
  83        struct fq_flow_head new_flows;
  84
  85        struct fq_flow_head old_flows;
  86
  87        struct rb_root  delayed;        /* for rate limited flows */
  88        u64             time_next_delayed_flow;
  89        unsigned long   unthrottle_latency_ns;
  90
  91        struct fq_flow  internal;       /* for non classified or high prio packets */
  92        u32             quantum;
  93        u32             initial_quantum;
  94        u32             flow_refill_delay;
  95        u32             flow_max_rate;  /* optional max rate per flow */
  96        u32             flow_plimit;    /* max packets per flow */
  97        u32             orphan_mask;    /* mask for orphaned skb */
  98        u32             low_rate_threshold;
  99        struct rb_root  *fq_root;
 100        u8              rate_enable;
 101        u8              fq_trees_log;
 102
 103        u32             flows;
 104        u32             inactive_flows;
 105        u32             throttled_flows;
 106
 107        u64             stat_gc_flows;
 108        u64             stat_internal_packets;
 109        u64             stat_tcp_retrans;
 110        u64             stat_throttled;
 111        u64             stat_flows_plimit;
 112        u64             stat_pkts_too_long;
 113        u64             stat_allocation_errors;
 114        struct qdisc_watchdog watchdog;
 115};
 116
 117/* special value to mark a detached flow (not on old/new list) */
 118static struct fq_flow detached, throttled;
 119
 120static void fq_flow_set_detached(struct fq_flow *f)
 121{
 122        f->next = &detached;
 123        f->age = jiffies;
 124}
 125
 126static bool fq_flow_is_detached(const struct fq_flow *f)
 127{
 128        return f->next == &detached;
 129}
 130
 131static bool fq_flow_is_throttled(const struct fq_flow *f)
 132{
 133        return f->next == &throttled;
 134}
 135
 136static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
 137{
 138        if (head->first)
 139                head->last->next = flow;
 140        else
 141                head->first = flow;
 142        head->last = flow;
 143        flow->next = NULL;
 144}
 145
 146static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
 147{
 148        rb_erase(&f->rate_node, &q->delayed);
 149        q->throttled_flows--;
 150        fq_flow_add_tail(&q->old_flows, f);
 151}
 152
 153static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 154{
 155        struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
 156
 157        while (*p) {
 158                struct fq_flow *aux;
 159
 160                parent = *p;
 161                aux = rb_entry(parent, struct fq_flow, rate_node);
 162                if (f->time_next_packet >= aux->time_next_packet)
 163                        p = &parent->rb_right;
 164                else
 165                        p = &parent->rb_left;
 166        }
 167        rb_link_node(&f->rate_node, parent, p);
 168        rb_insert_color(&f->rate_node, &q->delayed);
 169        q->throttled_flows++;
 170        q->stat_throttled++;
 171
 172        f->next = &throttled;
 173        if (q->time_next_delayed_flow > f->time_next_packet)
 174                q->time_next_delayed_flow = f->time_next_packet;
 175}
 176
 177
 178static struct kmem_cache *fq_flow_cachep __read_mostly;
 179
 180
 181/* limit number of collected flows per round */
 182#define FQ_GC_MAX 8
 183#define FQ_GC_AGE (3*HZ)
 184
 185static bool fq_gc_candidate(const struct fq_flow *f)
 186{
 187        return fq_flow_is_detached(f) &&
 188               time_after(jiffies, f->age + FQ_GC_AGE);
 189}
 190
 191static void fq_gc(struct fq_sched_data *q,
 192                  struct rb_root *root,
 193                  struct sock *sk)
 194{
 195        struct fq_flow *f, *tofree[FQ_GC_MAX];
 196        struct rb_node **p, *parent;
 197        int fcnt = 0;
 198
 199        p = &root->rb_node;
 200        parent = NULL;
 201        while (*p) {
 202                parent = *p;
 203
 204                f = rb_entry(parent, struct fq_flow, fq_node);
 205                if (f->sk == sk)
 206                        break;
 207
 208                if (fq_gc_candidate(f)) {
 209                        tofree[fcnt++] = f;
 210                        if (fcnt == FQ_GC_MAX)
 211                                break;
 212                }
 213
 214                if (f->sk > sk)
 215                        p = &parent->rb_right;
 216                else
 217                        p = &parent->rb_left;
 218        }
 219
 220        q->flows -= fcnt;
 221        q->inactive_flows -= fcnt;
 222        q->stat_gc_flows += fcnt;
 223        while (fcnt) {
 224                struct fq_flow *f = tofree[--fcnt];
 225
 226                rb_erase(&f->fq_node, root);
 227                kmem_cache_free(fq_flow_cachep, f);
 228        }
 229}
 230
 231static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 232{
 233        struct rb_node **p, *parent;
 234        struct sock *sk = skb->sk;
 235        struct rb_root *root;
 236        struct fq_flow *f;
 237
 238        /* warning: no starvation prevention... */
 239        if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
 240                return &q->internal;
 241
 242        /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
 243         * or a listener (SYNCOOKIE mode)
 244         * 1) request sockets are not full blown,
 245         *    they do not contain sk_pacing_rate
 246         * 2) They are not part of a 'flow' yet
 247         * 3) We do not want to rate limit them (eg SYNFLOOD attack),
 248         *    especially if the listener set SO_MAX_PACING_RATE
 249         * 4) We pretend they are orphaned
 250         */
 251        if (!sk || sk_listener(sk)) {
 252                unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
 253
 254                /* By forcing low order bit to 1, we make sure to not
 255                 * collide with a local flow (socket pointers are word aligned)
 256                 */
 257                sk = (struct sock *)((hash << 1) | 1UL);
 258                skb_orphan(skb);
 259        }
 260
 261        root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
 262
 263        if (q->flows >= (2U << q->fq_trees_log) &&
 264            q->inactive_flows > q->flows/2)
 265                fq_gc(q, root, sk);
 266
 267        p = &root->rb_node;
 268        parent = NULL;
 269        while (*p) {
 270                parent = *p;
 271
 272                f = rb_entry(parent, struct fq_flow, fq_node);
 273                if (f->sk == sk) {
 274                        /* socket might have been reallocated, so check
 275                         * if its sk_hash is the same.
 276                         * It not, we need to refill credit with
 277                         * initial quantum
 278                         */
 279                        if (unlikely(skb->sk &&
 280                                     f->socket_hash != sk->sk_hash)) {
 281                                f->credit = q->initial_quantum;
 282                                f->socket_hash = sk->sk_hash;
 283                                if (q->rate_enable)
 284                                        smp_store_release(&sk->sk_pacing_status,
 285                                                          SK_PACING_FQ);
 286                                if (fq_flow_is_throttled(f))
 287                                        fq_flow_unset_throttled(q, f);
 288                                f->time_next_packet = 0ULL;
 289                        }
 290                        return f;
 291                }
 292                if (f->sk > sk)
 293                        p = &parent->rb_right;
 294                else
 295                        p = &parent->rb_left;
 296        }
 297
 298        f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
 299        if (unlikely(!f)) {
 300                q->stat_allocation_errors++;
 301                return &q->internal;
 302        }
 303        fq_flow_set_detached(f);
 304        f->sk = sk;
 305        if (skb->sk) {
 306                f->socket_hash = sk->sk_hash;
 307                if (q->rate_enable)
 308                        smp_store_release(&sk->sk_pacing_status,
 309                                          SK_PACING_FQ);
 310        }
 311        f->credit = q->initial_quantum;
 312
 313        rb_link_node(&f->fq_node, parent, p);
 314        rb_insert_color(&f->fq_node, root);
 315
 316        q->flows++;
 317        q->inactive_flows++;
 318        return f;
 319}
 320
 321
 322/* remove one skb from head of flow queue */
 323static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
 324{
 325        struct sk_buff *skb = flow->head;
 326
 327        if (skb) {
 328                flow->head = skb->next;
 329                skb_mark_not_on_list(skb);
 330                flow->qlen--;
 331                qdisc_qstats_backlog_dec(sch, skb);
 332                sch->q.qlen--;
 333        }
 334        return skb;
 335}
 336
 337/* We might add in the future detection of retransmits
 338 * For the time being, just return false
 339 */
 340static bool skb_is_retransmit(struct sk_buff *skb)
 341{
 342        return false;
 343}
 344
 345/* add skb to flow queue
 346 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
 347 * We special case tcp retransmits to be transmitted before other packets.
 348 * We rely on fact that TCP retransmits are unlikely, so we do not waste
 349 * a separate queue or a pointer.
 350 * head->  [retrans pkt 1]
 351 *         [retrans pkt 2]
 352 *         [ normal pkt 1]
 353 *         [ normal pkt 2]
 354 *         [ normal pkt 3]
 355 * tail->  [ normal pkt 4]
 356 */
 357static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
 358{
 359        struct sk_buff *prev, *head = flow->head;
 360
 361        skb->next = NULL;
 362        if (!head) {
 363                flow->head = skb;
 364                flow->tail = skb;
 365                return;
 366        }
 367        if (likely(!skb_is_retransmit(skb))) {
 368                flow->tail->next = skb;
 369                flow->tail = skb;
 370                return;
 371        }
 372
 373        /* This skb is a tcp retransmit,
 374         * find the last retrans packet in the queue
 375         */
 376        prev = NULL;
 377        while (skb_is_retransmit(head)) {
 378                prev = head;
 379                head = head->next;
 380                if (!head)
 381                        break;
 382        }
 383        if (!prev) { /* no rtx packet in queue, become the new head */
 384                skb->next = flow->head;
 385                flow->head = skb;
 386        } else {
 387                if (prev == flow->tail)
 388                        flow->tail = skb;
 389                else
 390                        skb->next = prev->next;
 391                prev->next = skb;
 392        }
 393}
 394
 395static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 396                      struct sk_buff **to_free)
 397{
 398        struct fq_sched_data *q = qdisc_priv(sch);
 399        struct fq_flow *f;
 400
 401        if (unlikely(sch->q.qlen >= sch->limit))
 402                return qdisc_drop(skb, sch, to_free);
 403
 404        f = fq_classify(skb, q);
 405        if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
 406                q->stat_flows_plimit++;
 407                return qdisc_drop(skb, sch, to_free);
 408        }
 409
 410        f->qlen++;
 411        if (skb_is_retransmit(skb))
 412                q->stat_tcp_retrans++;
 413        qdisc_qstats_backlog_inc(sch, skb);
 414        if (fq_flow_is_detached(f)) {
 415                fq_flow_add_tail(&q->new_flows, f);
 416                if (time_after(jiffies, f->age + q->flow_refill_delay))
 417                        f->credit = max_t(u32, f->credit, q->quantum);
 418                q->inactive_flows--;
 419        }
 420
 421        /* Note: this overwrites f->age */
 422        flow_queue_add(f, skb);
 423
 424        if (unlikely(f == &q->internal)) {
 425                q->stat_internal_packets++;
 426        }
 427        sch->q.qlen++;
 428
 429        return NET_XMIT_SUCCESS;
 430}
 431
 432static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 433{
 434        unsigned long sample;
 435        struct rb_node *p;
 436
 437        if (q->time_next_delayed_flow > now)
 438                return;
 439
 440        /* Update unthrottle latency EWMA.
 441         * This is cheap and can help diagnosing timer/latency problems.
 442         */
 443        sample = (unsigned long)(now - q->time_next_delayed_flow);
 444        q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
 445        q->unthrottle_latency_ns += sample >> 3;
 446
 447        q->time_next_delayed_flow = ~0ULL;
 448        while ((p = rb_first(&q->delayed)) != NULL) {
 449                struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
 450
 451                if (f->time_next_packet > now) {
 452                        q->time_next_delayed_flow = f->time_next_packet;
 453                        break;
 454                }
 455                fq_flow_unset_throttled(q, f);
 456        }
 457}
 458
 459static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 460{
 461        struct fq_sched_data *q = qdisc_priv(sch);
 462        u64 now = ktime_get_ns();
 463        struct fq_flow_head *head;
 464        struct sk_buff *skb;
 465        struct fq_flow *f;
 466        u32 rate, plen;
 467
 468        skb = fq_dequeue_head(sch, &q->internal);
 469        if (skb)
 470                goto out;
 471        fq_check_throttled(q, now);
 472begin:
 473        head = &q->new_flows;
 474        if (!head->first) {
 475                head = &q->old_flows;
 476                if (!head->first) {
 477                        if (q->time_next_delayed_flow != ~0ULL)
 478                                qdisc_watchdog_schedule_ns(&q->watchdog,
 479                                                           q->time_next_delayed_flow);
 480                        return NULL;
 481                }
 482        }
 483        f = head->first;
 484
 485        if (f->credit <= 0) {
 486                f->credit += q->quantum;
 487                head->first = f->next;
 488                fq_flow_add_tail(&q->old_flows, f);
 489                goto begin;
 490        }
 491
 492        skb = f->head;
 493        if (unlikely(skb && now < f->time_next_packet &&
 494                     !skb_is_tcp_pure_ack(skb))) {
 495                head->first = f->next;
 496                fq_flow_set_throttled(q, f);
 497                goto begin;
 498        }
 499
 500        skb = fq_dequeue_head(sch, f);
 501        if (!skb) {
 502                head->first = f->next;
 503                /* force a pass through old_flows to prevent starvation */
 504                if ((head == &q->new_flows) && q->old_flows.first) {
 505                        fq_flow_add_tail(&q->old_flows, f);
 506                } else {
 507                        fq_flow_set_detached(f);
 508                        q->inactive_flows++;
 509                }
 510                goto begin;
 511        }
 512        prefetch(&skb->end);
 513        f->credit -= qdisc_pkt_len(skb);
 514
 515        if (!q->rate_enable)
 516                goto out;
 517
 518        /* Do not pace locally generated ack packets */
 519        if (skb_is_tcp_pure_ack(skb))
 520                goto out;
 521
 522        rate = q->flow_max_rate;
 523        if (skb->sk)
 524                rate = min(skb->sk->sk_pacing_rate, rate);
 525
 526        if (rate <= q->low_rate_threshold) {
 527                f->credit = 0;
 528                plen = qdisc_pkt_len(skb);
 529        } else {
 530                plen = max(qdisc_pkt_len(skb), q->quantum);
 531                if (f->credit > 0)
 532                        goto out;
 533        }
 534        if (rate != ~0U) {
 535                u64 len = (u64)plen * NSEC_PER_SEC;
 536
 537                if (likely(rate))
 538                        do_div(len, rate);
 539                /* Since socket rate can change later,
 540                 * clamp the delay to 1 second.
 541                 * Really, providers of too big packets should be fixed !
 542                 */
 543                if (unlikely(len > NSEC_PER_SEC)) {
 544                        len = NSEC_PER_SEC;
 545                        q->stat_pkts_too_long++;
 546                }
 547                /* Account for schedule/timers drifts.
 548                 * f->time_next_packet was set when prior packet was sent,
 549                 * and current time (@now) can be too late by tens of us.
 550                 */
 551                if (f->time_next_packet)
 552                        len -= min(len/2, now - f->time_next_packet);
 553                f->time_next_packet = now + len;
 554        }
 555out:
 556        qdisc_bstats_update(sch, skb);
 557        return skb;
 558}
 559
 560static void fq_flow_purge(struct fq_flow *flow)
 561{
 562        rtnl_kfree_skbs(flow->head, flow->tail);
 563        flow->head = NULL;
 564        flow->qlen = 0;
 565}
 566
 567static void fq_reset(struct Qdisc *sch)
 568{
 569        struct fq_sched_data *q = qdisc_priv(sch);
 570        struct rb_root *root;
 571        struct rb_node *p;
 572        struct fq_flow *f;
 573        unsigned int idx;
 574
 575        sch->q.qlen = 0;
 576        sch->qstats.backlog = 0;
 577
 578        fq_flow_purge(&q->internal);
 579
 580        if (!q->fq_root)
 581                return;
 582
 583        for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
 584                root = &q->fq_root[idx];
 585                while ((p = rb_first(root)) != NULL) {
 586                        f = rb_entry(p, struct fq_flow, fq_node);
 587                        rb_erase(p, root);
 588
 589                        fq_flow_purge(f);
 590
 591                        kmem_cache_free(fq_flow_cachep, f);
 592                }
 593        }
 594        q->new_flows.first      = NULL;
 595        q->old_flows.first      = NULL;
 596        q->delayed              = RB_ROOT;
 597        q->flows                = 0;
 598        q->inactive_flows       = 0;
 599        q->throttled_flows      = 0;
 600}
 601
 602static void fq_rehash(struct fq_sched_data *q,
 603                      struct rb_root *old_array, u32 old_log,
 604                      struct rb_root *new_array, u32 new_log)
 605{
 606        struct rb_node *op, **np, *parent;
 607        struct rb_root *oroot, *nroot;
 608        struct fq_flow *of, *nf;
 609        int fcnt = 0;
 610        u32 idx;
 611
 612        for (idx = 0; idx < (1U << old_log); idx++) {
 613                oroot = &old_array[idx];
 614                while ((op = rb_first(oroot)) != NULL) {
 615                        rb_erase(op, oroot);
 616                        of = rb_entry(op, struct fq_flow, fq_node);
 617                        if (fq_gc_candidate(of)) {
 618                                fcnt++;
 619                                kmem_cache_free(fq_flow_cachep, of);
 620                                continue;
 621                        }
 622                        nroot = &new_array[hash_ptr(of->sk, new_log)];
 623
 624                        np = &nroot->rb_node;
 625                        parent = NULL;
 626                        while (*np) {
 627                                parent = *np;
 628
 629                                nf = rb_entry(parent, struct fq_flow, fq_node);
 630                                BUG_ON(nf->sk == of->sk);
 631
 632                                if (nf->sk > of->sk)
 633                                        np = &parent->rb_right;
 634                                else
 635                                        np = &parent->rb_left;
 636                        }
 637
 638                        rb_link_node(&of->fq_node, parent, np);
 639                        rb_insert_color(&of->fq_node, nroot);
 640                }
 641        }
 642        q->flows -= fcnt;
 643        q->inactive_flows -= fcnt;
 644        q->stat_gc_flows += fcnt;
 645}
 646
 647static void fq_free(void *addr)
 648{
 649        kvfree(addr);
 650}
 651
 652static int fq_resize(struct Qdisc *sch, u32 log)
 653{
 654        struct fq_sched_data *q = qdisc_priv(sch);
 655        struct rb_root *array;
 656        void *old_fq_root;
 657        u32 idx;
 658
 659        if (q->fq_root && log == q->fq_trees_log)
 660                return 0;
 661
 662        /* If XPS was setup, we can allocate memory on right NUMA node */
 663        array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
 664                              netdev_queue_numa_node_read(sch->dev_queue));
 665        if (!array)
 666                return -ENOMEM;
 667
 668        for (idx = 0; idx < (1U << log); idx++)
 669                array[idx] = RB_ROOT;
 670
 671        sch_tree_lock(sch);
 672
 673        old_fq_root = q->fq_root;
 674        if (old_fq_root)
 675                fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
 676
 677        q->fq_root = array;
 678        q->fq_trees_log = log;
 679
 680        sch_tree_unlock(sch);
 681
 682        fq_free(old_fq_root);
 683
 684        return 0;
 685}
 686
 687static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 688        [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
 689        [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
 690        [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
 691        [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
 692        [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
 693        [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
 694        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
 695        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
 696        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
 697        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
 698};
 699
 700static int fq_change(struct Qdisc *sch, struct nlattr *opt,
 701                     struct netlink_ext_ack *extack)
 702{
 703        struct fq_sched_data *q = qdisc_priv(sch);
 704        struct nlattr *tb[TCA_FQ_MAX + 1];
 705        int err, drop_count = 0;
 706        unsigned drop_len = 0;
 707        u32 fq_log;
 708
 709        if (!opt)
 710                return -EINVAL;
 711
 712        err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
 713                                          NULL);
 714        if (err < 0)
 715                return err;
 716
 717        sch_tree_lock(sch);
 718
 719        fq_log = q->fq_trees_log;
 720
 721        if (tb[TCA_FQ_BUCKETS_LOG]) {
 722                u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
 723
 724                if (nval >= 1 && nval <= ilog2(256*1024))
 725                        fq_log = nval;
 726                else
 727                        err = -EINVAL;
 728        }
 729        if (tb[TCA_FQ_PLIMIT])
 730                sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
 731
 732        if (tb[TCA_FQ_FLOW_PLIMIT])
 733                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 734
 735        if (tb[TCA_FQ_QUANTUM]) {
 736                u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 737
 738                if (quantum > 0 && quantum <= (1 << 20)) {
 739                        q->quantum = quantum;
 740                } else {
 741                        NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
 742                        err = -EINVAL;
 743                }
 744        }
 745
 746        if (tb[TCA_FQ_INITIAL_QUANTUM])
 747                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 748
 749        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
 750                pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
 751                                    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
 752
 753        if (tb[TCA_FQ_FLOW_MAX_RATE])
 754                q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
 755
 756        if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
 757                q->low_rate_threshold =
 758                        nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
 759
 760        if (tb[TCA_FQ_RATE_ENABLE]) {
 761                u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
 762
 763                if (enable <= 1)
 764                        q->rate_enable = enable;
 765                else
 766                        err = -EINVAL;
 767        }
 768
 769        if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
 770                u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
 771
 772                q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
 773        }
 774
 775        if (tb[TCA_FQ_ORPHAN_MASK])
 776                q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
 777
 778        if (!err) {
 779                sch_tree_unlock(sch);
 780                err = fq_resize(sch, fq_log);
 781                sch_tree_lock(sch);
 782        }
 783        while (sch->q.qlen > sch->limit) {
 784                struct sk_buff *skb = fq_dequeue(sch);
 785
 786                if (!skb)
 787                        break;
 788                drop_len += qdisc_pkt_len(skb);
 789                rtnl_kfree_skbs(skb, skb);
 790                drop_count++;
 791        }
 792        qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
 793
 794        sch_tree_unlock(sch);
 795        return err;
 796}
 797
 798static void fq_destroy(struct Qdisc *sch)
 799{
 800        struct fq_sched_data *q = qdisc_priv(sch);
 801
 802        fq_reset(sch);
 803        fq_free(q->fq_root);
 804        qdisc_watchdog_cancel(&q->watchdog);
 805}
 806
 807static int fq_init(struct Qdisc *sch, struct nlattr *opt,
 808                   struct netlink_ext_ack *extack)
 809{
 810        struct fq_sched_data *q = qdisc_priv(sch);
 811        int err;
 812
 813        sch->limit              = 10000;
 814        q->flow_plimit          = 100;
 815        q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
 816        q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
 817        q->flow_refill_delay    = msecs_to_jiffies(40);
 818        q->flow_max_rate        = ~0U;
 819        q->time_next_delayed_flow = ~0ULL;
 820        q->rate_enable          = 1;
 821        q->new_flows.first      = NULL;
 822        q->old_flows.first      = NULL;
 823        q->delayed              = RB_ROOT;
 824        q->fq_root              = NULL;
 825        q->fq_trees_log         = ilog2(1024);
 826        q->orphan_mask          = 1024 - 1;
 827        q->low_rate_threshold   = 550000 / 8;
 828        qdisc_watchdog_init(&q->watchdog, sch);
 829
 830        if (opt)
 831                err = fq_change(sch, opt, extack);
 832        else
 833                err = fq_resize(sch, q->fq_trees_log);
 834
 835        return err;
 836}
 837
 838static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 839{
 840        struct fq_sched_data *q = qdisc_priv(sch);
 841        struct nlattr *opts;
 842
 843        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 844        if (opts == NULL)
 845                goto nla_put_failure;
 846
 847        /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
 848
 849        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
 850            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
 851            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
 852            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
 853            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
 854            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
 855            nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
 856                        jiffies_to_usecs(q->flow_refill_delay)) ||
 857            nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
 858            nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
 859                        q->low_rate_threshold) ||
 860            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
 861                goto nla_put_failure;
 862
 863        return nla_nest_end(skb, opts);
 864
 865nla_put_failure:
 866        return -1;
 867}
 868
 869static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 870{
 871        struct fq_sched_data *q = qdisc_priv(sch);
 872        struct tc_fq_qd_stats st;
 873
 874        sch_tree_lock(sch);
 875
 876        st.gc_flows               = q->stat_gc_flows;
 877        st.highprio_packets       = q->stat_internal_packets;
 878        st.tcp_retrans            = q->stat_tcp_retrans;
 879        st.throttled              = q->stat_throttled;
 880        st.flows_plimit           = q->stat_flows_plimit;
 881        st.pkts_too_long          = q->stat_pkts_too_long;
 882        st.allocation_errors      = q->stat_allocation_errors;
 883        st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
 884        st.flows                  = q->flows;
 885        st.inactive_flows         = q->inactive_flows;
 886        st.throttled_flows        = q->throttled_flows;
 887        st.unthrottle_latency_ns  = min_t(unsigned long,
 888                                          q->unthrottle_latency_ns, ~0U);
 889        sch_tree_unlock(sch);
 890
 891        return gnet_stats_copy_app(d, &st, sizeof(st));
 892}
 893
 894static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
 895        .id             =       "fq",
 896        .priv_size      =       sizeof(struct fq_sched_data),
 897
 898        .enqueue        =       fq_enqueue,
 899        .dequeue        =       fq_dequeue,
 900        .peek           =       qdisc_peek_dequeued,
 901        .init           =       fq_init,
 902        .reset          =       fq_reset,
 903        .destroy        =       fq_destroy,
 904        .change         =       fq_change,
 905        .dump           =       fq_dump,
 906        .dump_stats     =       fq_dump_stats,
 907        .owner          =       THIS_MODULE,
 908};
 909
 910static int __init fq_module_init(void)
 911{
 912        int ret;
 913
 914        fq_flow_cachep = kmem_cache_create("fq_flow_cache",
 915                                           sizeof(struct fq_flow),
 916                                           0, 0, NULL);
 917        if (!fq_flow_cachep)
 918                return -ENOMEM;
 919
 920        ret = register_qdisc(&fq_qdisc_ops);
 921        if (ret)
 922                kmem_cache_destroy(fq_flow_cachep);
 923        return ret;
 924}
 925
 926static void __exit fq_module_exit(void)
 927{
 928        unregister_qdisc(&fq_qdisc_ops);
 929        kmem_cache_destroy(fq_flow_cachep);
 930}
 931
 932module_init(fq_module_init)
 933module_exit(fq_module_exit)
 934MODULE_AUTHOR("Eric Dumazet");
 935MODULE_LICENSE("GPL");
 936