linux/net/sched/sch_gred.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_gred.c Generic Random Early Detection queue.
   4 *
   5 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
   6 *
   7 *             991129: -  Bug fix with grio mode
   8 *                     - a better sing. AvgQ mode with Grio(WRED)
   9 *                     - A finer grained VQ dequeue based on suggestion
  10 *                       from Ren Liu
  11 *                     - More error checks
  12 *
  13 *  For all the glorious comments look at include/net/red.h
  14 */
  15
  16#include <linux/slab.h>
  17#include <linux/module.h>
  18#include <linux/types.h>
  19#include <linux/kernel.h>
  20#include <linux/skbuff.h>
  21#include <net/pkt_cls.h>
  22#include <net/pkt_sched.h>
  23#include <net/red.h>
  24
  25#define GRED_DEF_PRIO (MAX_DPs / 2)
  26#define GRED_VQ_MASK (MAX_DPs - 1)
  27
  28#define GRED_VQ_RED_FLAGS       (TC_RED_ECN | TC_RED_HARDDROP)
  29
  30struct gred_sched_data;
  31struct gred_sched;
  32
  33struct gred_sched_data {
  34        u32             limit;          /* HARD maximal queue length    */
  35        u32             DP;             /* the drop parameters */
  36        u32             red_flags;      /* virtualQ version of red_flags */
  37        u64             bytesin;        /* bytes seen on virtualQ so far*/
  38        u32             packetsin;      /* packets seen on virtualQ so far*/
  39        u32             backlog;        /* bytes on the virtualQ */
  40        u8              prio;           /* the prio of this vq */
  41
  42        struct red_parms parms;
  43        struct red_vars  vars;
  44        struct red_stats stats;
  45};
  46
  47enum {
  48        GRED_WRED_MODE = 1,
  49        GRED_RIO_MODE,
  50};
  51
  52struct gred_sched {
  53        struct gred_sched_data *tab[MAX_DPs];
  54        unsigned long   flags;
  55        u32             red_flags;
  56        u32             DPs;
  57        u32             def;
  58        struct red_vars wred_set;
  59};
  60
  61static inline int gred_wred_mode(struct gred_sched *table)
  62{
  63        return test_bit(GRED_WRED_MODE, &table->flags);
  64}
  65
  66static inline void gred_enable_wred_mode(struct gred_sched *table)
  67{
  68        __set_bit(GRED_WRED_MODE, &table->flags);
  69}
  70
  71static inline void gred_disable_wred_mode(struct gred_sched *table)
  72{
  73        __clear_bit(GRED_WRED_MODE, &table->flags);
  74}
  75
  76static inline int gred_rio_mode(struct gred_sched *table)
  77{
  78        return test_bit(GRED_RIO_MODE, &table->flags);
  79}
  80
  81static inline void gred_enable_rio_mode(struct gred_sched *table)
  82{
  83        __set_bit(GRED_RIO_MODE, &table->flags);
  84}
  85
  86static inline void gred_disable_rio_mode(struct gred_sched *table)
  87{
  88        __clear_bit(GRED_RIO_MODE, &table->flags);
  89}
  90
  91static inline int gred_wred_mode_check(struct Qdisc *sch)
  92{
  93        struct gred_sched *table = qdisc_priv(sch);
  94        int i;
  95
  96        /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
  97        for (i = 0; i < table->DPs; i++) {
  98                struct gred_sched_data *q = table->tab[i];
  99                int n;
 100
 101                if (q == NULL)
 102                        continue;
 103
 104                for (n = i + 1; n < table->DPs; n++)
 105                        if (table->tab[n] && table->tab[n]->prio == q->prio)
 106                                return 1;
 107        }
 108
 109        return 0;
 110}
 111
 112static inline unsigned int gred_backlog(struct gred_sched *table,
 113                                        struct gred_sched_data *q,
 114                                        struct Qdisc *sch)
 115{
 116        if (gred_wred_mode(table))
 117                return sch->qstats.backlog;
 118        else
 119                return q->backlog;
 120}
 121
 122static inline u16 tc_index_to_dp(struct sk_buff *skb)
 123{
 124        return skb->tc_index & GRED_VQ_MASK;
 125}
 126
 127static inline void gred_load_wred_set(const struct gred_sched *table,
 128                                      struct gred_sched_data *q)
 129{
 130        q->vars.qavg = table->wred_set.qavg;
 131        q->vars.qidlestart = table->wred_set.qidlestart;
 132}
 133
 134static inline void gred_store_wred_set(struct gred_sched *table,
 135                                       struct gred_sched_data *q)
 136{
 137        table->wred_set.qavg = q->vars.qavg;
 138        table->wred_set.qidlestart = q->vars.qidlestart;
 139}
 140
 141static int gred_use_ecn(struct gred_sched_data *q)
 142{
 143        return q->red_flags & TC_RED_ECN;
 144}
 145
 146static int gred_use_harddrop(struct gred_sched_data *q)
 147{
 148        return q->red_flags & TC_RED_HARDDROP;
 149}
 150
 151static bool gred_per_vq_red_flags_used(struct gred_sched *table)
 152{
 153        unsigned int i;
 154
 155        /* Local per-vq flags couldn't have been set unless global are 0 */
 156        if (table->red_flags)
 157                return false;
 158        for (i = 0; i < MAX_DPs; i++)
 159                if (table->tab[i] && table->tab[i]->red_flags)
 160                        return true;
 161        return false;
 162}
 163
 164static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 165                        struct sk_buff **to_free)
 166{
 167        struct gred_sched_data *q = NULL;
 168        struct gred_sched *t = qdisc_priv(sch);
 169        unsigned long qavg = 0;
 170        u16 dp = tc_index_to_dp(skb);
 171
 172        if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 173                dp = t->def;
 174
 175                q = t->tab[dp];
 176                if (!q) {
 177                        /* Pass through packets not assigned to a DP
 178                         * if no default DP has been configured. This
 179                         * allows for DP flows to be left untouched.
 180                         */
 181                        if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
 182                                        sch->limit))
 183                                return qdisc_enqueue_tail(skb, sch);
 184                        else
 185                                goto drop;
 186                }
 187
 188                /* fix tc_index? --could be controversial but needed for
 189                   requeueing */
 190                skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
 191        }
 192
 193        /* sum up all the qaves of prios < ours to get the new qave */
 194        if (!gred_wred_mode(t) && gred_rio_mode(t)) {
 195                int i;
 196
 197                for (i = 0; i < t->DPs; i++) {
 198                        if (t->tab[i] && t->tab[i]->prio < q->prio &&
 199                            !red_is_idling(&t->tab[i]->vars))
 200                                qavg += t->tab[i]->vars.qavg;
 201                }
 202
 203        }
 204
 205        q->packetsin++;
 206        q->bytesin += qdisc_pkt_len(skb);
 207
 208        if (gred_wred_mode(t))
 209                gred_load_wred_set(t, q);
 210
 211        q->vars.qavg = red_calc_qavg(&q->parms,
 212                                     &q->vars,
 213                                     gred_backlog(t, q, sch));
 214
 215        if (red_is_idling(&q->vars))
 216                red_end_of_idle_period(&q->vars);
 217
 218        if (gred_wred_mode(t))
 219                gred_store_wred_set(t, q);
 220
 221        switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
 222        case RED_DONT_MARK:
 223                break;
 224
 225        case RED_PROB_MARK:
 226                qdisc_qstats_overlimit(sch);
 227                if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
 228                        q->stats.prob_drop++;
 229                        goto congestion_drop;
 230                }
 231
 232                q->stats.prob_mark++;
 233                break;
 234
 235        case RED_HARD_MARK:
 236                qdisc_qstats_overlimit(sch);
 237                if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
 238                    !INET_ECN_set_ce(skb)) {
 239                        q->stats.forced_drop++;
 240                        goto congestion_drop;
 241                }
 242                q->stats.forced_mark++;
 243                break;
 244        }
 245
 246        if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
 247                q->backlog += qdisc_pkt_len(skb);
 248                return qdisc_enqueue_tail(skb, sch);
 249        }
 250
 251        q->stats.pdrop++;
 252drop:
 253        return qdisc_drop(skb, sch, to_free);
 254
 255congestion_drop:
 256        qdisc_drop(skb, sch, to_free);
 257        return NET_XMIT_CN;
 258}
 259
 260static struct sk_buff *gred_dequeue(struct Qdisc *sch)
 261{
 262        struct sk_buff *skb;
 263        struct gred_sched *t = qdisc_priv(sch);
 264
 265        skb = qdisc_dequeue_head(sch);
 266
 267        if (skb) {
 268                struct gred_sched_data *q;
 269                u16 dp = tc_index_to_dp(skb);
 270
 271                if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 272                        net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
 273                                             tc_index_to_dp(skb));
 274                } else {
 275                        q->backlog -= qdisc_pkt_len(skb);
 276
 277                        if (gred_wred_mode(t)) {
 278                                if (!sch->qstats.backlog)
 279                                        red_start_of_idle_period(&t->wred_set);
 280                        } else {
 281                                if (!q->backlog)
 282                                        red_start_of_idle_period(&q->vars);
 283                        }
 284                }
 285
 286                return skb;
 287        }
 288
 289        return NULL;
 290}
 291
 292static void gred_reset(struct Qdisc *sch)
 293{
 294        int i;
 295        struct gred_sched *t = qdisc_priv(sch);
 296
 297        qdisc_reset_queue(sch);
 298
 299        for (i = 0; i < t->DPs; i++) {
 300                struct gred_sched_data *q = t->tab[i];
 301
 302                if (!q)
 303                        continue;
 304
 305                red_restart(&q->vars);
 306                q->backlog = 0;
 307        }
 308}
 309
 310static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
 311{
 312        struct gred_sched *table = qdisc_priv(sch);
 313        struct net_device *dev = qdisc_dev(sch);
 314        struct tc_gred_qopt_offload opt = {
 315                .command        = command,
 316                .handle         = sch->handle,
 317                .parent         = sch->parent,
 318        };
 319
 320        if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 321                return;
 322
 323        if (command == TC_GRED_REPLACE) {
 324                unsigned int i;
 325
 326                opt.set.grio_on = gred_rio_mode(table);
 327                opt.set.wred_on = gred_wred_mode(table);
 328                opt.set.dp_cnt = table->DPs;
 329                opt.set.dp_def = table->def;
 330
 331                for (i = 0; i < table->DPs; i++) {
 332                        struct gred_sched_data *q = table->tab[i];
 333
 334                        if (!q)
 335                                continue;
 336                        opt.set.tab[i].present = true;
 337                        opt.set.tab[i].limit = q->limit;
 338                        opt.set.tab[i].prio = q->prio;
 339                        opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
 340                        opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
 341                        opt.set.tab[i].is_ecn = gred_use_ecn(q);
 342                        opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
 343                        opt.set.tab[i].probability = q->parms.max_P;
 344                        opt.set.tab[i].backlog = &q->backlog;
 345                }
 346                opt.set.qstats = &sch->qstats;
 347        }
 348
 349        dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
 350}
 351
 352static int gred_offload_dump_stats(struct Qdisc *sch)
 353{
 354        struct gred_sched *table = qdisc_priv(sch);
 355        struct tc_gred_qopt_offload *hw_stats;
 356        unsigned int i;
 357        int ret;
 358
 359        hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
 360        if (!hw_stats)
 361                return -ENOMEM;
 362
 363        hw_stats->command = TC_GRED_STATS;
 364        hw_stats->handle = sch->handle;
 365        hw_stats->parent = sch->parent;
 366
 367        for (i = 0; i < MAX_DPs; i++)
 368                if (table->tab[i])
 369                        hw_stats->stats.xstats[i] = &table->tab[i]->stats;
 370
 371        ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
 372        /* Even if driver returns failure adjust the stats - in case offload
 373         * ended but driver still wants to adjust the values.
 374         */
 375        for (i = 0; i < MAX_DPs; i++) {
 376                if (!table->tab[i])
 377                        continue;
 378                table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
 379                table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
 380                table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
 381
 382                _bstats_update(&sch->bstats,
 383                               hw_stats->stats.bstats[i].bytes,
 384                               hw_stats->stats.bstats[i].packets);
 385                sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
 386                sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
 387                sch->qstats.drops += hw_stats->stats.qstats[i].drops;
 388                sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
 389                sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
 390        }
 391
 392        kfree(hw_stats);
 393        return ret;
 394}
 395
 396static inline void gred_destroy_vq(struct gred_sched_data *q)
 397{
 398        kfree(q);
 399}
 400
 401static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
 402                                 struct netlink_ext_ack *extack)
 403{
 404        struct gred_sched *table = qdisc_priv(sch);
 405        struct tc_gred_sopt *sopt;
 406        bool red_flags_changed;
 407        int i;
 408
 409        if (!dps)
 410                return -EINVAL;
 411
 412        sopt = nla_data(dps);
 413
 414        if (sopt->DPs > MAX_DPs) {
 415                NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
 416                return -EINVAL;
 417        }
 418        if (sopt->DPs == 0) {
 419                NL_SET_ERR_MSG_MOD(extack,
 420                                   "number of virtual queues can't be 0");
 421                return -EINVAL;
 422        }
 423        if (sopt->def_DP >= sopt->DPs) {
 424                NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
 425                return -EINVAL;
 426        }
 427        if (sopt->flags && gred_per_vq_red_flags_used(table)) {
 428                NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
 429                return -EINVAL;
 430        }
 431
 432        sch_tree_lock(sch);
 433        table->DPs = sopt->DPs;
 434        table->def = sopt->def_DP;
 435        red_flags_changed = table->red_flags != sopt->flags;
 436        table->red_flags = sopt->flags;
 437
 438        /*
 439         * Every entry point to GRED is synchronized with the above code
 440         * and the DP is checked against DPs, i.e. shadowed VQs can no
 441         * longer be found so we can unlock right here.
 442         */
 443        sch_tree_unlock(sch);
 444
 445        if (sopt->grio) {
 446                gred_enable_rio_mode(table);
 447                gred_disable_wred_mode(table);
 448                if (gred_wred_mode_check(sch))
 449                        gred_enable_wred_mode(table);
 450        } else {
 451                gred_disable_rio_mode(table);
 452                gred_disable_wred_mode(table);
 453        }
 454
 455        if (red_flags_changed)
 456                for (i = 0; i < table->DPs; i++)
 457                        if (table->tab[i])
 458                                table->tab[i]->red_flags =
 459                                        table->red_flags & GRED_VQ_RED_FLAGS;
 460
 461        for (i = table->DPs; i < MAX_DPs; i++) {
 462                if (table->tab[i]) {
 463                        pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
 464                                i);
 465                        gred_destroy_vq(table->tab[i]);
 466                        table->tab[i] = NULL;
 467                }
 468        }
 469
 470        gred_offload(sch, TC_GRED_REPLACE);
 471        return 0;
 472}
 473
 474static inline int gred_change_vq(struct Qdisc *sch, int dp,
 475                                 struct tc_gred_qopt *ctl, int prio,
 476                                 u8 *stab, u32 max_P,
 477                                 struct gred_sched_data **prealloc,
 478                                 struct netlink_ext_ack *extack)
 479{
 480        struct gred_sched *table = qdisc_priv(sch);
 481        struct gred_sched_data *q = table->tab[dp];
 482
 483        if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
 484                NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
 485                return -EINVAL;
 486        }
 487
 488        if (!q) {
 489                table->tab[dp] = q = *prealloc;
 490                *prealloc = NULL;
 491                if (!q)
 492                        return -ENOMEM;
 493                q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
 494        }
 495
 496        q->DP = dp;
 497        q->prio = prio;
 498        if (ctl->limit > sch->limit)
 499                q->limit = sch->limit;
 500        else
 501                q->limit = ctl->limit;
 502
 503        if (q->backlog == 0)
 504                red_end_of_idle_period(&q->vars);
 505
 506        red_set_parms(&q->parms,
 507                      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
 508                      ctl->Scell_log, stab, max_P);
 509        red_set_vars(&q->vars);
 510        return 0;
 511}
 512
 513static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
 514        [TCA_GRED_VQ_DP]        = { .type = NLA_U32 },
 515        [TCA_GRED_VQ_FLAGS]     = { .type = NLA_U32 },
 516};
 517
 518static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
 519        [TCA_GRED_VQ_ENTRY]     = { .type = NLA_NESTED },
 520};
 521
 522static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
 523        [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
 524        [TCA_GRED_STAB]         = { .len = 256 },
 525        [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
 526        [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
 527        [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
 528        [TCA_GRED_VQ_LIST]      = { .type = NLA_NESTED },
 529};
 530
 531static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
 532{
 533        struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
 534        u32 dp;
 535
 536        nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
 537                                    gred_vq_policy, NULL);
 538
 539        dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
 540
 541        if (tb[TCA_GRED_VQ_FLAGS])
 542                table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
 543}
 544
 545static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
 546{
 547        const struct nlattr *attr;
 548        int rem;
 549
 550        nla_for_each_nested(attr, vqs, rem) {
 551                switch (nla_type(attr)) {
 552                case TCA_GRED_VQ_ENTRY:
 553                        gred_vq_apply(table, attr);
 554                        break;
 555                }
 556        }
 557}
 558
 559static int gred_vq_validate(struct gred_sched *table, u32 cdp,
 560                            const struct nlattr *entry,
 561                            struct netlink_ext_ack *extack)
 562{
 563        struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
 564        int err;
 565        u32 dp;
 566
 567        err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
 568                                          gred_vq_policy, extack);
 569        if (err < 0)
 570                return err;
 571
 572        if (!tb[TCA_GRED_VQ_DP]) {
 573                NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
 574                return -EINVAL;
 575        }
 576        dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
 577        if (dp >= table->DPs) {
 578                NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
 579                return -EINVAL;
 580        }
 581        if (dp != cdp && !table->tab[dp]) {
 582                NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
 583                return -EINVAL;
 584        }
 585
 586        if (tb[TCA_GRED_VQ_FLAGS]) {
 587                u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
 588
 589                if (table->red_flags && table->red_flags != red_flags) {
 590                        NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
 591                        return -EINVAL;
 592                }
 593                if (red_flags & ~GRED_VQ_RED_FLAGS) {
 594                        NL_SET_ERR_MSG_MOD(extack,
 595                                           "invalid RED flags specified");
 596                        return -EINVAL;
 597                }
 598        }
 599
 600        return 0;
 601}
 602
 603static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
 604                             struct nlattr *vqs, struct netlink_ext_ack *extack)
 605{
 606        const struct nlattr *attr;
 607        int rem, err;
 608
 609        err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
 610                                             gred_vqe_policy, extack);
 611        if (err < 0)
 612                return err;
 613
 614        nla_for_each_nested(attr, vqs, rem) {
 615                switch (nla_type(attr)) {
 616                case TCA_GRED_VQ_ENTRY:
 617                        err = gred_vq_validate(table, cdp, attr, extack);
 618                        if (err)
 619                                return err;
 620                        break;
 621                default:
 622                        NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
 623                        return -EINVAL;
 624                }
 625        }
 626
 627        if (rem > 0) {
 628                NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
 629                return -EINVAL;
 630        }
 631
 632        return 0;
 633}
 634
 635static int gred_change(struct Qdisc *sch, struct nlattr *opt,
 636                       struct netlink_ext_ack *extack)
 637{
 638        struct gred_sched *table = qdisc_priv(sch);
 639        struct tc_gred_qopt *ctl;
 640        struct nlattr *tb[TCA_GRED_MAX + 1];
 641        int err, prio = GRED_DEF_PRIO;
 642        u8 *stab;
 643        u32 max_P;
 644        struct gred_sched_data *prealloc;
 645
 646        if (opt == NULL)
 647                return -EINVAL;
 648
 649        err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
 650                                          extack);
 651        if (err < 0)
 652                return err;
 653
 654        if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
 655                if (tb[TCA_GRED_LIMIT] != NULL)
 656                        sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
 657                return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
 658        }
 659
 660        if (tb[TCA_GRED_PARMS] == NULL ||
 661            tb[TCA_GRED_STAB] == NULL ||
 662            tb[TCA_GRED_LIMIT] != NULL) {
 663                NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
 664                return -EINVAL;
 665        }
 666
 667        max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
 668
 669        ctl = nla_data(tb[TCA_GRED_PARMS]);
 670        stab = nla_data(tb[TCA_GRED_STAB]);
 671
 672        if (ctl->DP >= table->DPs) {
 673                NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
 674                return -EINVAL;
 675        }
 676
 677        if (tb[TCA_GRED_VQ_LIST]) {
 678                err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
 679                                        extack);
 680                if (err)
 681                        return err;
 682        }
 683
 684        if (gred_rio_mode(table)) {
 685                if (ctl->prio == 0) {
 686                        int def_prio = GRED_DEF_PRIO;
 687
 688                        if (table->tab[table->def])
 689                                def_prio = table->tab[table->def]->prio;
 690
 691                        printk(KERN_DEBUG "GRED: DP %u does not have a prio "
 692                               "setting default to %d\n", ctl->DP, def_prio);
 693
 694                        prio = def_prio;
 695                } else
 696                        prio = ctl->prio;
 697        }
 698
 699        prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
 700        sch_tree_lock(sch);
 701
 702        err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
 703                             extack);
 704        if (err < 0)
 705                goto err_unlock_free;
 706
 707        if (tb[TCA_GRED_VQ_LIST])
 708                gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
 709
 710        if (gred_rio_mode(table)) {
 711                gred_disable_wred_mode(table);
 712                if (gred_wred_mode_check(sch))
 713                        gred_enable_wred_mode(table);
 714        }
 715
 716        sch_tree_unlock(sch);
 717        kfree(prealloc);
 718
 719        gred_offload(sch, TC_GRED_REPLACE);
 720        return 0;
 721
 722err_unlock_free:
 723        sch_tree_unlock(sch);
 724        kfree(prealloc);
 725        return err;
 726}
 727
 728static int gred_init(struct Qdisc *sch, struct nlattr *opt,
 729                     struct netlink_ext_ack *extack)
 730{
 731        struct nlattr *tb[TCA_GRED_MAX + 1];
 732        int err;
 733
 734        if (!opt)
 735                return -EINVAL;
 736
 737        err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
 738                                          extack);
 739        if (err < 0)
 740                return err;
 741
 742        if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
 743                NL_SET_ERR_MSG_MOD(extack,
 744                                   "virtual queue configuration can't be specified at initialization time");
 745                return -EINVAL;
 746        }
 747
 748        if (tb[TCA_GRED_LIMIT])
 749                sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
 750        else
 751                sch->limit = qdisc_dev(sch)->tx_queue_len
 752                             * psched_mtu(qdisc_dev(sch));
 753
 754        return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
 755}
 756
 757static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 758{
 759        struct gred_sched *table = qdisc_priv(sch);
 760        struct nlattr *parms, *vqs, *opts = NULL;
 761        int i;
 762        u32 max_p[MAX_DPs];
 763        struct tc_gred_sopt sopt = {
 764                .DPs    = table->DPs,
 765                .def_DP = table->def,
 766                .grio   = gred_rio_mode(table),
 767                .flags  = table->red_flags,
 768        };
 769
 770        if (gred_offload_dump_stats(sch))
 771                goto nla_put_failure;
 772
 773        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 774        if (opts == NULL)
 775                goto nla_put_failure;
 776        if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
 777                goto nla_put_failure;
 778
 779        for (i = 0; i < MAX_DPs; i++) {
 780                struct gred_sched_data *q = table->tab[i];
 781
 782                max_p[i] = q ? q->parms.max_P : 0;
 783        }
 784        if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
 785                goto nla_put_failure;
 786
 787        if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
 788                goto nla_put_failure;
 789
 790        /* Old style all-in-one dump of VQs */
 791        parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
 792        if (parms == NULL)
 793                goto nla_put_failure;
 794
 795        for (i = 0; i < MAX_DPs; i++) {
 796                struct gred_sched_data *q = table->tab[i];
 797                struct tc_gred_qopt opt;
 798                unsigned long qavg;
 799
 800                memset(&opt, 0, sizeof(opt));
 801
 802                if (!q) {
 803                        /* hack -- fix at some point with proper message
 804                           This is how we indicate to tc that there is no VQ
 805                           at this DP */
 806
 807                        opt.DP = MAX_DPs + i;
 808                        goto append_opt;
 809                }
 810
 811                opt.limit       = q->limit;
 812                opt.DP          = q->DP;
 813                opt.backlog     = gred_backlog(table, q, sch);
 814                opt.prio        = q->prio;
 815                opt.qth_min     = q->parms.qth_min >> q->parms.Wlog;
 816                opt.qth_max     = q->parms.qth_max >> q->parms.Wlog;
 817                opt.Wlog        = q->parms.Wlog;
 818                opt.Plog        = q->parms.Plog;
 819                opt.Scell_log   = q->parms.Scell_log;
 820                opt.other       = q->stats.other;
 821                opt.early       = q->stats.prob_drop;
 822                opt.forced      = q->stats.forced_drop;
 823                opt.pdrop       = q->stats.pdrop;
 824                opt.packets     = q->packetsin;
 825                opt.bytesin     = q->bytesin;
 826
 827                if (gred_wred_mode(table))
 828                        gred_load_wred_set(table, q);
 829
 830                qavg = red_calc_qavg(&q->parms, &q->vars,
 831                                     q->vars.qavg >> q->parms.Wlog);
 832                opt.qave = qavg >> q->parms.Wlog;
 833
 834append_opt:
 835                if (nla_append(skb, sizeof(opt), &opt) < 0)
 836                        goto nla_put_failure;
 837        }
 838
 839        nla_nest_end(skb, parms);
 840
 841        /* Dump the VQs again, in more structured way */
 842        vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
 843        if (!vqs)
 844                goto nla_put_failure;
 845
 846        for (i = 0; i < MAX_DPs; i++) {
 847                struct gred_sched_data *q = table->tab[i];
 848                struct nlattr *vq;
 849
 850                if (!q)
 851                        continue;
 852
 853                vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
 854                if (!vq)
 855                        goto nla_put_failure;
 856
 857                if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
 858                        goto nla_put_failure;
 859
 860                if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
 861                        goto nla_put_failure;
 862
 863                /* Stats */
 864                if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
 865                                      TCA_GRED_VQ_PAD))
 866                        goto nla_put_failure;
 867                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
 868                        goto nla_put_failure;
 869                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
 870                                gred_backlog(table, q, sch)))
 871                        goto nla_put_failure;
 872                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
 873                                q->stats.prob_drop))
 874                        goto nla_put_failure;
 875                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
 876                                q->stats.prob_mark))
 877                        goto nla_put_failure;
 878                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
 879                                q->stats.forced_drop))
 880                        goto nla_put_failure;
 881                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
 882                                q->stats.forced_mark))
 883                        goto nla_put_failure;
 884                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
 885                        goto nla_put_failure;
 886                if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
 887                        goto nla_put_failure;
 888
 889                nla_nest_end(skb, vq);
 890        }
 891        nla_nest_end(skb, vqs);
 892
 893        return nla_nest_end(skb, opts);
 894
 895nla_put_failure:
 896        nla_nest_cancel(skb, opts);
 897        return -EMSGSIZE;
 898}
 899
 900static void gred_destroy(struct Qdisc *sch)
 901{
 902        struct gred_sched *table = qdisc_priv(sch);
 903        int i;
 904
 905        for (i = 0; i < table->DPs; i++) {
 906                if (table->tab[i])
 907                        gred_destroy_vq(table->tab[i]);
 908        }
 909        gred_offload(sch, TC_GRED_DESTROY);
 910}
 911
 912static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
 913        .id             =       "gred",
 914        .priv_size      =       sizeof(struct gred_sched),
 915        .enqueue        =       gred_enqueue,
 916        .dequeue        =       gred_dequeue,
 917        .peek           =       qdisc_peek_head,
 918        .init           =       gred_init,
 919        .reset          =       gred_reset,
 920        .destroy        =       gred_destroy,
 921        .change         =       gred_change,
 922        .dump           =       gred_dump,
 923        .owner          =       THIS_MODULE,
 924};
 925
 926static int __init gred_module_init(void)
 927{
 928        return register_qdisc(&gred_qdisc_ops);
 929}
 930
 931static void __exit gred_module_exit(void)
 932{
 933        unregister_qdisc(&gred_qdisc_ops);
 934}
 935
 936module_init(gred_module_init)
 937module_exit(gred_module_exit)
 938
 939MODULE_LICENSE("GPL");
 940