linux/net/sched/sch_fq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
   4 *
   5 *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
   6 *
   7 *  Meant to be mostly used for locally generated traffic :
   8 *  Fast classification depends on skb->sk being set before reaching us.
   9 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
  10 *  All packets belonging to a socket are considered as a 'flow'.
  11 *
  12 *  Flows are dynamically allocated and stored in a hash table of RB trees
  13 *  They are also part of one Round Robin 'queues' (new or old flows)
  14 *
  15 *  Burst avoidance (aka pacing) capability :
  16 *
  17 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
  18 *  bunch of packets, and this packet scheduler adds delay between
  19 *  packets to respect rate limitation.
  20 *
  21 *  enqueue() :
  22 *   - lookup one RB tree (out of 1024 or more) to find the flow.
  23 *     If non existent flow, create it, add it to the tree.
  24 *     Add skb to the per flow list of skb (fifo).
  25 *   - Use a special fifo for high prio packets
  26 *
  27 *  dequeue() : serves flows in Round Robin
  28 *  Note : When a flow becomes empty, we do not immediately remove it from
  29 *  rb trees, for performance reasons (its expected to send additional packets,
  30 *  or SLAB cache will reuse socket for another flow)
  31 */
  32
  33#include <linux/module.h>
  34#include <linux/types.h>
  35#include <linux/kernel.h>
  36#include <linux/jiffies.h>
  37#include <linux/string.h>
  38#include <linux/in.h>
  39#include <linux/errno.h>
  40#include <linux/init.h>
  41#include <linux/skbuff.h>
  42#include <linux/slab.h>
  43#include <linux/rbtree.h>
  44#include <linux/hash.h>
  45#include <linux/prefetch.h>
  46#include <linux/vmalloc.h>
  47#include <net/netlink.h>
  48#include <net/pkt_sched.h>
  49#include <net/sock.h>
  50#include <net/tcp_states.h>
  51#include <net/tcp.h>
  52
  53struct fq_skb_cb {
  54        u64             time_to_send;
  55};
  56
  57static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
  58{
  59        qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
  60        return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
  61}
  62
  63/*
  64 * Per flow structure, dynamically allocated.
  65 * If packets have monotically increasing time_to_send, they are placed in O(1)
  66 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
  67 */
  68struct fq_flow {
  69        struct rb_root  t_root;
  70        struct sk_buff  *head;          /* list of skbs for this flow : first skb */
  71        union {
  72                struct sk_buff *tail;   /* last skb in the list */
  73                unsigned long  age;     /* jiffies when flow was emptied, for gc */
  74        };
  75        struct rb_node  fq_node;        /* anchor in fq_root[] trees */
  76        struct sock     *sk;
  77        int             qlen;           /* number of packets in flow queue */
  78        int             credit;
  79        u32             socket_hash;    /* sk_hash */
  80        struct fq_flow *next;           /* next pointer in RR lists, or &detached */
  81
  82        struct rb_node  rate_node;      /* anchor in q->delayed tree */
  83        u64             time_next_packet;
  84};
  85
  86struct fq_flow_head {
  87        struct fq_flow *first;
  88        struct fq_flow *last;
  89};
  90
  91struct fq_sched_data {
  92        struct fq_flow_head new_flows;
  93
  94        struct fq_flow_head old_flows;
  95
  96        struct rb_root  delayed;        /* for rate limited flows */
  97        u64             time_next_delayed_flow;
  98        unsigned long   unthrottle_latency_ns;
  99
 100        struct fq_flow  internal;       /* for non classified or high prio packets */
 101        u32             quantum;
 102        u32             initial_quantum;
 103        u32             flow_refill_delay;
 104        u32             flow_plimit;    /* max packets per flow */
 105        unsigned long   flow_max_rate;  /* optional max rate per flow */
 106        u64             ce_threshold;
 107        u32             orphan_mask;    /* mask for orphaned skb */
 108        u32             low_rate_threshold;
 109        struct rb_root  *fq_root;
 110        u8              rate_enable;
 111        u8              fq_trees_log;
 112
 113        u32             flows;
 114        u32             inactive_flows;
 115        u32             throttled_flows;
 116
 117        u64             stat_gc_flows;
 118        u64             stat_internal_packets;
 119        u64             stat_throttled;
 120        u64             stat_ce_mark;
 121        u64             stat_flows_plimit;
 122        u64             stat_pkts_too_long;
 123        u64             stat_allocation_errors;
 124        struct qdisc_watchdog watchdog;
 125};
 126
 127/* special value to mark a detached flow (not on old/new list) */
 128static struct fq_flow detached, throttled;
 129
 130static void fq_flow_set_detached(struct fq_flow *f)
 131{
 132        f->next = &detached;
 133        f->age = jiffies;
 134}
 135
 136static bool fq_flow_is_detached(const struct fq_flow *f)
 137{
 138        return f->next == &detached;
 139}
 140
 141static bool fq_flow_is_throttled(const struct fq_flow *f)
 142{
 143        return f->next == &throttled;
 144}
 145
 146static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
 147{
 148        if (head->first)
 149                head->last->next = flow;
 150        else
 151                head->first = flow;
 152        head->last = flow;
 153        flow->next = NULL;
 154}
 155
 156static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
 157{
 158        rb_erase(&f->rate_node, &q->delayed);
 159        q->throttled_flows--;
 160        fq_flow_add_tail(&q->old_flows, f);
 161}
 162
 163static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 164{
 165        struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
 166
 167        while (*p) {
 168                struct fq_flow *aux;
 169
 170                parent = *p;
 171                aux = rb_entry(parent, struct fq_flow, rate_node);
 172                if (f->time_next_packet >= aux->time_next_packet)
 173                        p = &parent->rb_right;
 174                else
 175                        p = &parent->rb_left;
 176        }
 177        rb_link_node(&f->rate_node, parent, p);
 178        rb_insert_color(&f->rate_node, &q->delayed);
 179        q->throttled_flows++;
 180        q->stat_throttled++;
 181
 182        f->next = &throttled;
 183        if (q->time_next_delayed_flow > f->time_next_packet)
 184                q->time_next_delayed_flow = f->time_next_packet;
 185}
 186
 187
 188static struct kmem_cache *fq_flow_cachep __read_mostly;
 189
 190
 191/* limit number of collected flows per round */
 192#define FQ_GC_MAX 8
 193#define FQ_GC_AGE (3*HZ)
 194
 195static bool fq_gc_candidate(const struct fq_flow *f)
 196{
 197        return fq_flow_is_detached(f) &&
 198               time_after(jiffies, f->age + FQ_GC_AGE);
 199}
 200
 201static void fq_gc(struct fq_sched_data *q,
 202                  struct rb_root *root,
 203                  struct sock *sk)
 204{
 205        struct fq_flow *f, *tofree[FQ_GC_MAX];
 206        struct rb_node **p, *parent;
 207        int fcnt = 0;
 208
 209        p = &root->rb_node;
 210        parent = NULL;
 211        while (*p) {
 212                parent = *p;
 213
 214                f = rb_entry(parent, struct fq_flow, fq_node);
 215                if (f->sk == sk)
 216                        break;
 217
 218                if (fq_gc_candidate(f)) {
 219                        tofree[fcnt++] = f;
 220                        if (fcnt == FQ_GC_MAX)
 221                                break;
 222                }
 223
 224                if (f->sk > sk)
 225                        p = &parent->rb_right;
 226                else
 227                        p = &parent->rb_left;
 228        }
 229
 230        q->flows -= fcnt;
 231        q->inactive_flows -= fcnt;
 232        q->stat_gc_flows += fcnt;
 233        while (fcnt) {
 234                struct fq_flow *f = tofree[--fcnt];
 235
 236                rb_erase(&f->fq_node, root);
 237                kmem_cache_free(fq_flow_cachep, f);
 238        }
 239}
 240
 241static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 242{
 243        struct rb_node **p, *parent;
 244        struct sock *sk = skb->sk;
 245        struct rb_root *root;
 246        struct fq_flow *f;
 247
 248        /* warning: no starvation prevention... */
 249        if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
 250                return &q->internal;
 251
 252        /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
 253         * or a listener (SYNCOOKIE mode)
 254         * 1) request sockets are not full blown,
 255         *    they do not contain sk_pacing_rate
 256         * 2) They are not part of a 'flow' yet
 257         * 3) We do not want to rate limit them (eg SYNFLOOD attack),
 258         *    especially if the listener set SO_MAX_PACING_RATE
 259         * 4) We pretend they are orphaned
 260         */
 261        if (!sk || sk_listener(sk)) {
 262                unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
 263
 264                /* By forcing low order bit to 1, we make sure to not
 265                 * collide with a local flow (socket pointers are word aligned)
 266                 */
 267                sk = (struct sock *)((hash << 1) | 1UL);
 268                skb_orphan(skb);
 269        } else if (sk->sk_state == TCP_CLOSE) {
 270                unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
 271                /*
 272                 * Sockets in TCP_CLOSE are non connected.
 273                 * Typical use case is UDP sockets, they can send packets
 274                 * with sendto() to many different destinations.
 275                 * We probably could use a generic bit advertising
 276                 * non connected sockets, instead of sk_state == TCP_CLOSE,
 277                 * if we care enough.
 278                 */
 279                sk = (struct sock *)((hash << 1) | 1UL);
 280        }
 281
 282        root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
 283
 284        if (q->flows >= (2U << q->fq_trees_log) &&
 285            q->inactive_flows > q->flows/2)
 286                fq_gc(q, root, sk);
 287
 288        p = &root->rb_node;
 289        parent = NULL;
 290        while (*p) {
 291                parent = *p;
 292
 293                f = rb_entry(parent, struct fq_flow, fq_node);
 294                if (f->sk == sk) {
 295                        /* socket might have been reallocated, so check
 296                         * if its sk_hash is the same.
 297                         * It not, we need to refill credit with
 298                         * initial quantum
 299                         */
 300                        if (unlikely(skb->sk == sk &&
 301                                     f->socket_hash != sk->sk_hash)) {
 302                                f->credit = q->initial_quantum;
 303                                f->socket_hash = sk->sk_hash;
 304                                if (fq_flow_is_throttled(f))
 305                                        fq_flow_unset_throttled(q, f);
 306                                f->time_next_packet = 0ULL;
 307                        }
 308                        return f;
 309                }
 310                if (f->sk > sk)
 311                        p = &parent->rb_right;
 312                else
 313                        p = &parent->rb_left;
 314        }
 315
 316        f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
 317        if (unlikely(!f)) {
 318                q->stat_allocation_errors++;
 319                return &q->internal;
 320        }
 321        /* f->t_root is already zeroed after kmem_cache_zalloc() */
 322
 323        fq_flow_set_detached(f);
 324        f->sk = sk;
 325        if (skb->sk == sk)
 326                f->socket_hash = sk->sk_hash;
 327        f->credit = q->initial_quantum;
 328
 329        rb_link_node(&f->fq_node, parent, p);
 330        rb_insert_color(&f->fq_node, root);
 331
 332        q->flows++;
 333        q->inactive_flows++;
 334        return f;
 335}
 336
 337static struct sk_buff *fq_peek(struct fq_flow *flow)
 338{
 339        struct sk_buff *skb = skb_rb_first(&flow->t_root);
 340        struct sk_buff *head = flow->head;
 341
 342        if (!skb)
 343                return head;
 344
 345        if (!head)
 346                return skb;
 347
 348        if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
 349                return skb;
 350        return head;
 351}
 352
 353static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
 354                          struct sk_buff *skb)
 355{
 356        if (skb == flow->head) {
 357                flow->head = skb->next;
 358        } else {
 359                rb_erase(&skb->rbnode, &flow->t_root);
 360                skb->dev = qdisc_dev(sch);
 361        }
 362}
 363
 364/* remove one skb from head of flow queue */
 365static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
 366{
 367        struct sk_buff *skb = fq_peek(flow);
 368
 369        if (skb) {
 370                fq_erase_head(sch, flow, skb);
 371                skb_mark_not_on_list(skb);
 372                flow->qlen--;
 373                qdisc_qstats_backlog_dec(sch, skb);
 374                sch->q.qlen--;
 375        }
 376        return skb;
 377}
 378
 379static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
 380{
 381        struct rb_node **p, *parent;
 382        struct sk_buff *head, *aux;
 383
 384        fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns();
 385
 386        head = flow->head;
 387        if (!head ||
 388            fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
 389                if (!head)
 390                        flow->head = skb;
 391                else
 392                        flow->tail->next = skb;
 393                flow->tail = skb;
 394                skb->next = NULL;
 395                return;
 396        }
 397
 398        p = &flow->t_root.rb_node;
 399        parent = NULL;
 400
 401        while (*p) {
 402                parent = *p;
 403                aux = rb_to_skb(parent);
 404                if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
 405                        p = &parent->rb_right;
 406                else
 407                        p = &parent->rb_left;
 408        }
 409        rb_link_node(&skb->rbnode, parent, p);
 410        rb_insert_color(&skb->rbnode, &flow->t_root);
 411}
 412
 413static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 414                      struct sk_buff **to_free)
 415{
 416        struct fq_sched_data *q = qdisc_priv(sch);
 417        struct fq_flow *f;
 418
 419        if (unlikely(sch->q.qlen >= sch->limit))
 420                return qdisc_drop(skb, sch, to_free);
 421
 422        f = fq_classify(skb, q);
 423        if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
 424                q->stat_flows_plimit++;
 425                return qdisc_drop(skb, sch, to_free);
 426        }
 427
 428        f->qlen++;
 429        qdisc_qstats_backlog_inc(sch, skb);
 430        if (fq_flow_is_detached(f)) {
 431                struct sock *sk = skb->sk;
 432
 433                fq_flow_add_tail(&q->new_flows, f);
 434                if (time_after(jiffies, f->age + q->flow_refill_delay))
 435                        f->credit = max_t(u32, f->credit, q->quantum);
 436                if (sk && q->rate_enable) {
 437                        if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
 438                                     SK_PACING_FQ))
 439                                smp_store_release(&sk->sk_pacing_status,
 440                                                  SK_PACING_FQ);
 441                }
 442                q->inactive_flows--;
 443        }
 444
 445        /* Note: this overwrites f->age */
 446        flow_queue_add(f, skb);
 447
 448        if (unlikely(f == &q->internal)) {
 449                q->stat_internal_packets++;
 450        }
 451        sch->q.qlen++;
 452
 453        return NET_XMIT_SUCCESS;
 454}
 455
 456static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 457{
 458        unsigned long sample;
 459        struct rb_node *p;
 460
 461        if (q->time_next_delayed_flow > now)
 462                return;
 463
 464        /* Update unthrottle latency EWMA.
 465         * This is cheap and can help diagnosing timer/latency problems.
 466         */
 467        sample = (unsigned long)(now - q->time_next_delayed_flow);
 468        q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
 469        q->unthrottle_latency_ns += sample >> 3;
 470
 471        q->time_next_delayed_flow = ~0ULL;
 472        while ((p = rb_first(&q->delayed)) != NULL) {
 473                struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
 474
 475                if (f->time_next_packet > now) {
 476                        q->time_next_delayed_flow = f->time_next_packet;
 477                        break;
 478                }
 479                fq_flow_unset_throttled(q, f);
 480        }
 481}
 482
 483static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 484{
 485        struct fq_sched_data *q = qdisc_priv(sch);
 486        struct fq_flow_head *head;
 487        struct sk_buff *skb;
 488        struct fq_flow *f;
 489        unsigned long rate;
 490        u32 plen;
 491        u64 now;
 492
 493        if (!sch->q.qlen)
 494                return NULL;
 495
 496        skb = fq_dequeue_head(sch, &q->internal);
 497        if (skb)
 498                goto out;
 499
 500        now = ktime_get_ns();
 501        fq_check_throttled(q, now);
 502begin:
 503        head = &q->new_flows;
 504        if (!head->first) {
 505                head = &q->old_flows;
 506                if (!head->first) {
 507                        if (q->time_next_delayed_flow != ~0ULL)
 508                                qdisc_watchdog_schedule_ns(&q->watchdog,
 509                                                           q->time_next_delayed_flow);
 510                        return NULL;
 511                }
 512        }
 513        f = head->first;
 514
 515        if (f->credit <= 0) {
 516                f->credit += q->quantum;
 517                head->first = f->next;
 518                fq_flow_add_tail(&q->old_flows, f);
 519                goto begin;
 520        }
 521
 522        skb = fq_peek(f);
 523        if (skb) {
 524                u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
 525                                             f->time_next_packet);
 526
 527                if (now < time_next_packet) {
 528                        head->first = f->next;
 529                        f->time_next_packet = time_next_packet;
 530                        fq_flow_set_throttled(q, f);
 531                        goto begin;
 532                }
 533                if (time_next_packet &&
 534                    (s64)(now - time_next_packet - q->ce_threshold) > 0) {
 535                        INET_ECN_set_ce(skb);
 536                        q->stat_ce_mark++;
 537                }
 538        }
 539
 540        skb = fq_dequeue_head(sch, f);
 541        if (!skb) {
 542                head->first = f->next;
 543                /* force a pass through old_flows to prevent starvation */
 544                if ((head == &q->new_flows) && q->old_flows.first) {
 545                        fq_flow_add_tail(&q->old_flows, f);
 546                } else {
 547                        fq_flow_set_detached(f);
 548                        q->inactive_flows++;
 549                }
 550                goto begin;
 551        }
 552        prefetch(&skb->end);
 553        plen = qdisc_pkt_len(skb);
 554        f->credit -= plen;
 555
 556        if (!q->rate_enable)
 557                goto out;
 558
 559        rate = q->flow_max_rate;
 560
 561        /* If EDT time was provided for this skb, we need to
 562         * update f->time_next_packet only if this qdisc enforces
 563         * a flow max rate.
 564         */
 565        if (!skb->tstamp) {
 566                if (skb->sk)
 567                        rate = min(skb->sk->sk_pacing_rate, rate);
 568
 569                if (rate <= q->low_rate_threshold) {
 570                        f->credit = 0;
 571                } else {
 572                        plen = max(plen, q->quantum);
 573                        if (f->credit > 0)
 574                                goto out;
 575                }
 576        }
 577        if (rate != ~0UL) {
 578                u64 len = (u64)plen * NSEC_PER_SEC;
 579
 580                if (likely(rate))
 581                        len = div64_ul(len, rate);
 582                /* Since socket rate can change later,
 583                 * clamp the delay to 1 second.
 584                 * Really, providers of too big packets should be fixed !
 585                 */
 586                if (unlikely(len > NSEC_PER_SEC)) {
 587                        len = NSEC_PER_SEC;
 588                        q->stat_pkts_too_long++;
 589                }
 590                /* Account for schedule/timers drifts.
 591                 * f->time_next_packet was set when prior packet was sent,
 592                 * and current time (@now) can be too late by tens of us.
 593                 */
 594                if (f->time_next_packet)
 595                        len -= min(len/2, now - f->time_next_packet);
 596                f->time_next_packet = now + len;
 597        }
 598out:
 599        qdisc_bstats_update(sch, skb);
 600        return skb;
 601}
 602
 603static void fq_flow_purge(struct fq_flow *flow)
 604{
 605        struct rb_node *p = rb_first(&flow->t_root);
 606
 607        while (p) {
 608                struct sk_buff *skb = rb_to_skb(p);
 609
 610                p = rb_next(p);
 611                rb_erase(&skb->rbnode, &flow->t_root);
 612                rtnl_kfree_skbs(skb, skb);
 613        }
 614        rtnl_kfree_skbs(flow->head, flow->tail);
 615        flow->head = NULL;
 616        flow->qlen = 0;
 617}
 618
 619static void fq_reset(struct Qdisc *sch)
 620{
 621        struct fq_sched_data *q = qdisc_priv(sch);
 622        struct rb_root *root;
 623        struct rb_node *p;
 624        struct fq_flow *f;
 625        unsigned int idx;
 626
 627        sch->q.qlen = 0;
 628        sch->qstats.backlog = 0;
 629
 630        fq_flow_purge(&q->internal);
 631
 632        if (!q->fq_root)
 633                return;
 634
 635        for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
 636                root = &q->fq_root[idx];
 637                while ((p = rb_first(root)) != NULL) {
 638                        f = rb_entry(p, struct fq_flow, fq_node);
 639                        rb_erase(p, root);
 640
 641                        fq_flow_purge(f);
 642
 643                        kmem_cache_free(fq_flow_cachep, f);
 644                }
 645        }
 646        q->new_flows.first      = NULL;
 647        q->old_flows.first      = NULL;
 648        q->delayed              = RB_ROOT;
 649        q->flows                = 0;
 650        q->inactive_flows       = 0;
 651        q->throttled_flows      = 0;
 652}
 653
 654static void fq_rehash(struct fq_sched_data *q,
 655                      struct rb_root *old_array, u32 old_log,
 656                      struct rb_root *new_array, u32 new_log)
 657{
 658        struct rb_node *op, **np, *parent;
 659        struct rb_root *oroot, *nroot;
 660        struct fq_flow *of, *nf;
 661        int fcnt = 0;
 662        u32 idx;
 663
 664        for (idx = 0; idx < (1U << old_log); idx++) {
 665                oroot = &old_array[idx];
 666                while ((op = rb_first(oroot)) != NULL) {
 667                        rb_erase(op, oroot);
 668                        of = rb_entry(op, struct fq_flow, fq_node);
 669                        if (fq_gc_candidate(of)) {
 670                                fcnt++;
 671                                kmem_cache_free(fq_flow_cachep, of);
 672                                continue;
 673                        }
 674                        nroot = &new_array[hash_ptr(of->sk, new_log)];
 675
 676                        np = &nroot->rb_node;
 677                        parent = NULL;
 678                        while (*np) {
 679                                parent = *np;
 680
 681                                nf = rb_entry(parent, struct fq_flow, fq_node);
 682                                BUG_ON(nf->sk == of->sk);
 683
 684                                if (nf->sk > of->sk)
 685                                        np = &parent->rb_right;
 686                                else
 687                                        np = &parent->rb_left;
 688                        }
 689
 690                        rb_link_node(&of->fq_node, parent, np);
 691                        rb_insert_color(&of->fq_node, nroot);
 692                }
 693        }
 694        q->flows -= fcnt;
 695        q->inactive_flows -= fcnt;
 696        q->stat_gc_flows += fcnt;
 697}
 698
 699static void fq_free(void *addr)
 700{
 701        kvfree(addr);
 702}
 703
 704static int fq_resize(struct Qdisc *sch, u32 log)
 705{
 706        struct fq_sched_data *q = qdisc_priv(sch);
 707        struct rb_root *array;
 708        void *old_fq_root;
 709        u32 idx;
 710
 711        if (q->fq_root && log == q->fq_trees_log)
 712                return 0;
 713
 714        /* If XPS was setup, we can allocate memory on right NUMA node */
 715        array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
 716                              netdev_queue_numa_node_read(sch->dev_queue));
 717        if (!array)
 718                return -ENOMEM;
 719
 720        for (idx = 0; idx < (1U << log); idx++)
 721                array[idx] = RB_ROOT;
 722
 723        sch_tree_lock(sch);
 724
 725        old_fq_root = q->fq_root;
 726        if (old_fq_root)
 727                fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
 728
 729        q->fq_root = array;
 730        q->fq_trees_log = log;
 731
 732        sch_tree_unlock(sch);
 733
 734        fq_free(old_fq_root);
 735
 736        return 0;
 737}
 738
 739static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 740        [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
 741        [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
 742        [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
 743        [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
 744        [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
 745        [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
 746        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
 747        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
 748        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
 749        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
 750        [TCA_FQ_CE_THRESHOLD]           = { .type = NLA_U32 },
 751};
 752
 753static int fq_change(struct Qdisc *sch, struct nlattr *opt,
 754                     struct netlink_ext_ack *extack)
 755{
 756        struct fq_sched_data *q = qdisc_priv(sch);
 757        struct nlattr *tb[TCA_FQ_MAX + 1];
 758        int err, drop_count = 0;
 759        unsigned drop_len = 0;
 760        u32 fq_log;
 761
 762        if (!opt)
 763                return -EINVAL;
 764
 765        err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
 766                                          NULL);
 767        if (err < 0)
 768                return err;
 769
 770        sch_tree_lock(sch);
 771
 772        fq_log = q->fq_trees_log;
 773
 774        if (tb[TCA_FQ_BUCKETS_LOG]) {
 775                u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
 776
 777                if (nval >= 1 && nval <= ilog2(256*1024))
 778                        fq_log = nval;
 779                else
 780                        err = -EINVAL;
 781        }
 782        if (tb[TCA_FQ_PLIMIT])
 783                sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
 784
 785        if (tb[TCA_FQ_FLOW_PLIMIT])
 786                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 787
 788        if (tb[TCA_FQ_QUANTUM]) {
 789                u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 790
 791                if (quantum > 0)
 792                        q->quantum = quantum;
 793                else
 794                        err = -EINVAL;
 795        }
 796
 797        if (tb[TCA_FQ_INITIAL_QUANTUM])
 798                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 799
 800        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
 801                pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
 802                                    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
 803
 804        if (tb[TCA_FQ_FLOW_MAX_RATE]) {
 805                u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
 806
 807                q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
 808        }
 809        if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
 810                q->low_rate_threshold =
 811                        nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
 812
 813        if (tb[TCA_FQ_RATE_ENABLE]) {
 814                u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
 815
 816                if (enable <= 1)
 817                        q->rate_enable = enable;
 818                else
 819                        err = -EINVAL;
 820        }
 821
 822        if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
 823                u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
 824
 825                q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
 826        }
 827
 828        if (tb[TCA_FQ_ORPHAN_MASK])
 829                q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
 830
 831        if (tb[TCA_FQ_CE_THRESHOLD])
 832                q->ce_threshold = (u64)NSEC_PER_USEC *
 833                                  nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
 834
 835        if (!err) {
 836                sch_tree_unlock(sch);
 837                err = fq_resize(sch, fq_log);
 838                sch_tree_lock(sch);
 839        }
 840        while (sch->q.qlen > sch->limit) {
 841                struct sk_buff *skb = fq_dequeue(sch);
 842
 843                if (!skb)
 844                        break;
 845                drop_len += qdisc_pkt_len(skb);
 846                rtnl_kfree_skbs(skb, skb);
 847                drop_count++;
 848        }
 849        qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
 850
 851        sch_tree_unlock(sch);
 852        return err;
 853}
 854
 855static void fq_destroy(struct Qdisc *sch)
 856{
 857        struct fq_sched_data *q = qdisc_priv(sch);
 858
 859        fq_reset(sch);
 860        fq_free(q->fq_root);
 861        qdisc_watchdog_cancel(&q->watchdog);
 862}
 863
 864static int fq_init(struct Qdisc *sch, struct nlattr *opt,
 865                   struct netlink_ext_ack *extack)
 866{
 867        struct fq_sched_data *q = qdisc_priv(sch);
 868        int err;
 869
 870        sch->limit              = 10000;
 871        q->flow_plimit          = 100;
 872        q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
 873        q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
 874        q->flow_refill_delay    = msecs_to_jiffies(40);
 875        q->flow_max_rate        = ~0UL;
 876        q->time_next_delayed_flow = ~0ULL;
 877        q->rate_enable          = 1;
 878        q->new_flows.first      = NULL;
 879        q->old_flows.first      = NULL;
 880        q->delayed              = RB_ROOT;
 881        q->fq_root              = NULL;
 882        q->fq_trees_log         = ilog2(1024);
 883        q->orphan_mask          = 1024 - 1;
 884        q->low_rate_threshold   = 550000 / 8;
 885
 886        /* Default ce_threshold of 4294 seconds */
 887        q->ce_threshold         = (u64)NSEC_PER_USEC * ~0U;
 888
 889        qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
 890
 891        if (opt)
 892                err = fq_change(sch, opt, extack);
 893        else
 894                err = fq_resize(sch, q->fq_trees_log);
 895
 896        return err;
 897}
 898
 899static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 900{
 901        struct fq_sched_data *q = qdisc_priv(sch);
 902        u64 ce_threshold = q->ce_threshold;
 903        struct nlattr *opts;
 904
 905        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 906        if (opts == NULL)
 907                goto nla_put_failure;
 908
 909        /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
 910
 911        do_div(ce_threshold, NSEC_PER_USEC);
 912
 913        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
 914            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
 915            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
 916            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
 917            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
 918            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
 919                        min_t(unsigned long, q->flow_max_rate, ~0U)) ||
 920            nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
 921                        jiffies_to_usecs(q->flow_refill_delay)) ||
 922            nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
 923            nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
 924                        q->low_rate_threshold) ||
 925            nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
 926            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
 927                goto nla_put_failure;
 928
 929        return nla_nest_end(skb, opts);
 930
 931nla_put_failure:
 932        return -1;
 933}
 934
 935static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 936{
 937        struct fq_sched_data *q = qdisc_priv(sch);
 938        struct tc_fq_qd_stats st;
 939
 940        sch_tree_lock(sch);
 941
 942        st.gc_flows               = q->stat_gc_flows;
 943        st.highprio_packets       = q->stat_internal_packets;
 944        st.tcp_retrans            = 0;
 945        st.throttled              = q->stat_throttled;
 946        st.flows_plimit           = q->stat_flows_plimit;
 947        st.pkts_too_long          = q->stat_pkts_too_long;
 948        st.allocation_errors      = q->stat_allocation_errors;
 949        st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
 950        st.flows                  = q->flows;
 951        st.inactive_flows         = q->inactive_flows;
 952        st.throttled_flows        = q->throttled_flows;
 953        st.unthrottle_latency_ns  = min_t(unsigned long,
 954                                          q->unthrottle_latency_ns, ~0U);
 955        st.ce_mark                = q->stat_ce_mark;
 956        sch_tree_unlock(sch);
 957
 958        return gnet_stats_copy_app(d, &st, sizeof(st));
 959}
 960
 961static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
 962        .id             =       "fq",
 963        .priv_size      =       sizeof(struct fq_sched_data),
 964
 965        .enqueue        =       fq_enqueue,
 966        .dequeue        =       fq_dequeue,
 967        .peek           =       qdisc_peek_dequeued,
 968        .init           =       fq_init,
 969        .reset          =       fq_reset,
 970        .destroy        =       fq_destroy,
 971        .change         =       fq_change,
 972        .dump           =       fq_dump,
 973        .dump_stats     =       fq_dump_stats,
 974        .owner          =       THIS_MODULE,
 975};
 976
 977static int __init fq_module_init(void)
 978{
 979        int ret;
 980
 981        fq_flow_cachep = kmem_cache_create("fq_flow_cache",
 982                                           sizeof(struct fq_flow),
 983                                           0, 0, NULL);
 984        if (!fq_flow_cachep)
 985                return -ENOMEM;
 986
 987        ret = register_qdisc(&fq_qdisc_ops);
 988        if (ret)
 989                kmem_cache_destroy(fq_flow_cachep);
 990        return ret;
 991}
 992
 993static void __exit fq_module_exit(void)
 994{
 995        unregister_qdisc(&fq_qdisc_ops);
 996        kmem_cache_destroy(fq_flow_cachep);
 997}
 998
 999module_init(fq_module_init)
1000module_exit(fq_module_exit)
1001MODULE_AUTHOR("Eric Dumazet");
1002MODULE_LICENSE("GPL");
1003