linux/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2018 Netronome Systems, Inc. */
   3
   4#include <linux/rtnetlink.h>
   5#include <net/pkt_cls.h>
   6#include <net/pkt_sched.h>
   7#include <net/red.h>
   8
   9#include "../nfpcore/nfp_cpp.h"
  10#include "../nfp_app.h"
  11#include "../nfp_main.h"
  12#include "../nfp_net.h"
  13#include "../nfp_port.h"
  14#include "main.h"
  15
  16static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
  17{
  18        return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
  19}
  20
  21static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
  22{
  23        return qdisc->children[id] &&
  24               qdisc->children[id] != NFP_QDISC_UNTRACKED;
  25}
  26
  27static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
  28{
  29        return rtnl_dereference(*slot);
  30}
  31
  32static void
  33nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
  34                        struct nfp_alink_stats *child)
  35{
  36        parent->tx_pkts         += child->tx_pkts;
  37        parent->tx_bytes        += child->tx_bytes;
  38        parent->backlog_pkts    += child->backlog_pkts;
  39        parent->backlog_bytes   += child->backlog_bytes;
  40        parent->overlimits      += child->overlimits;
  41        parent->drops           += child->drops;
  42}
  43
  44static void
  45nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
  46                         unsigned int queue)
  47{
  48        struct nfp_cpp *cpp = alink->abm->app->cpp;
  49        unsigned int i;
  50        int err;
  51
  52        if (!qdisc->offloaded)
  53                return;
  54
  55        for (i = 0; i < qdisc->red.num_bands; i++) {
  56                err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
  57                                                &qdisc->red.band[i].stats);
  58                if (err)
  59                        nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
  60                                i, queue, err);
  61
  62                err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
  63                                                 &qdisc->red.band[i].xstats);
  64                if (err)
  65                        nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
  66                                i, queue, err);
  67        }
  68}
  69
  70static void
  71nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
  72{
  73        unsigned int i;
  74
  75        if (qdisc->type != NFP_QDISC_MQ)
  76                return;
  77
  78        for (i = 0; i < alink->total_queues; i++)
  79                if (nfp_abm_qdisc_child_valid(qdisc, i))
  80                        nfp_abm_stats_update_red(alink, qdisc->children[i], i);
  81}
  82
  83static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
  84{
  85        alink->last_stats_update = time_now;
  86        if (alink->root_qdisc)
  87                nfp_abm_stats_update_mq(alink, alink->root_qdisc);
  88}
  89
  90static void nfp_abm_stats_update(struct nfp_abm_link *alink)
  91{
  92        u64 now;
  93
  94        /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
  95         * of all their leafs, so we would read the same stat multiple times
  96         * for every dump.
  97         */
  98        now = ktime_get();
  99        if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
 100                return;
 101
 102        __nfp_abm_stats_update(alink, now);
 103}
 104
 105static void
 106nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
 107                              unsigned int start, unsigned int end)
 108{
 109        unsigned int i;
 110
 111        for (i = start; i < end; i++)
 112                if (nfp_abm_qdisc_child_valid(qdisc, i)) {
 113                        qdisc->children[i]->use_cnt--;
 114                        qdisc->children[i] = NULL;
 115                }
 116}
 117
 118static void
 119nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
 120{
 121        unsigned int i;
 122
 123        /* Don't complain when qdisc is getting unlinked */
 124        if (qdisc->use_cnt)
 125                nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
 126                         qdisc->handle);
 127
 128        if (!nfp_abm_qdisc_is_red(qdisc))
 129                return;
 130
 131        for (i = 0; i < qdisc->red.num_bands; i++) {
 132                qdisc->red.band[i].stats.backlog_pkts = 0;
 133                qdisc->red.band[i].stats.backlog_bytes = 0;
 134        }
 135}
 136
 137static int
 138__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
 139                     unsigned int queue, struct nfp_alink_stats *prev_stats,
 140                     struct nfp_alink_xstats *prev_xstats)
 141{
 142        u64 backlog_pkts, backlog_bytes;
 143        int err;
 144
 145        /* Don't touch the backlog, backlog can only be reset after it has
 146         * been reported back to the tc qdisc stats.
 147         */
 148        backlog_pkts = prev_stats->backlog_pkts;
 149        backlog_bytes = prev_stats->backlog_bytes;
 150
 151        err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
 152        if (err) {
 153                nfp_err(alink->abm->app->cpp,
 154                        "RED stats init (%d, %d) failed with error %d\n",
 155                        band, queue, err);
 156                return err;
 157        }
 158
 159        err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
 160        if (err) {
 161                nfp_err(alink->abm->app->cpp,
 162                        "RED xstats init (%d, %d) failed with error %d\n",
 163                        band, queue, err);
 164                return err;
 165        }
 166
 167        prev_stats->backlog_pkts = backlog_pkts;
 168        prev_stats->backlog_bytes = backlog_bytes;
 169        return 0;
 170}
 171
 172static int
 173nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
 174                   unsigned int queue)
 175{
 176        unsigned int i;
 177        int err;
 178
 179        for (i = 0; i < qdisc->red.num_bands; i++) {
 180                err = __nfp_abm_stats_init(alink, i, queue,
 181                                           &qdisc->red.band[i].prev_stats,
 182                                           &qdisc->red.band[i].prev_xstats);
 183                if (err)
 184                        return err;
 185        }
 186
 187        return 0;
 188}
 189
 190static void
 191nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
 192                            unsigned int queue)
 193{
 194        bool good_red, good_gred;
 195        unsigned int i;
 196
 197        good_red = qdisc->type == NFP_QDISC_RED &&
 198                   qdisc->params_ok &&
 199                   qdisc->use_cnt == 1 &&
 200                   !alink->has_prio &&
 201                   !qdisc->children[0];
 202        good_gred = qdisc->type == NFP_QDISC_GRED &&
 203                    qdisc->params_ok &&
 204                    qdisc->use_cnt == 1;
 205        qdisc->offload_mark = good_red || good_gred;
 206
 207        /* If we are starting offload init prev_stats */
 208        if (qdisc->offload_mark && !qdisc->offloaded)
 209                if (nfp_abm_stats_init(alink, qdisc, queue))
 210                        qdisc->offload_mark = false;
 211
 212        if (!qdisc->offload_mark)
 213                return;
 214
 215        for (i = 0; i < alink->abm->num_bands; i++) {
 216                enum nfp_abm_q_action act;
 217
 218                nfp_abm_ctrl_set_q_lvl(alink, i, queue,
 219                                       qdisc->red.band[i].threshold);
 220                act = qdisc->red.band[i].ecn ?
 221                        NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
 222                nfp_abm_ctrl_set_q_act(alink, i, queue, act);
 223        }
 224}
 225
 226static void
 227nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
 228{
 229        unsigned int i;
 230
 231        qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
 232        if (!qdisc->offload_mark)
 233                return;
 234
 235        for (i = 0; i < alink->total_queues; i++) {
 236                struct nfp_qdisc *child = qdisc->children[i];
 237
 238                if (!nfp_abm_qdisc_child_valid(qdisc, i))
 239                        continue;
 240
 241                nfp_abm_offload_compile_red(alink, child, i);
 242        }
 243}
 244
 245void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
 246{
 247        struct nfp_abm *abm = alink->abm;
 248        struct radix_tree_iter iter;
 249        struct nfp_qdisc *qdisc;
 250        void __rcu **slot;
 251        size_t i;
 252
 253        /* Mark all thresholds as unconfigured */
 254        for (i = 0; i < abm->num_bands; i++)
 255                __bitmap_set(abm->threshold_undef,
 256                             i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
 257                             alink->total_queues);
 258
 259        /* Clear offload marks */
 260        radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
 261                qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
 262                qdisc->offload_mark = false;
 263        }
 264
 265        if (alink->root_qdisc)
 266                nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
 267
 268        /* Refresh offload status */
 269        radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
 270                qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
 271                if (!qdisc->offload_mark && qdisc->offloaded)
 272                        nfp_abm_qdisc_offload_stop(alink, qdisc);
 273                qdisc->offloaded = qdisc->offload_mark;
 274        }
 275
 276        /* Reset the unconfigured thresholds */
 277        for (i = 0; i < abm->num_thresholds; i++)
 278                if (test_bit(i, abm->threshold_undef))
 279                        __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
 280
 281        __nfp_abm_stats_update(alink, ktime_get());
 282}
 283
 284static void
 285nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
 286                       struct nfp_qdisc *qdisc)
 287{
 288        struct radix_tree_iter iter;
 289        unsigned int mq_refs = 0;
 290        void __rcu **slot;
 291
 292        if (!qdisc->use_cnt)
 293                return;
 294        /* MQ doesn't notify well on destruction, we need special handling of
 295         * MQ's children.
 296         */
 297        if (qdisc->type == NFP_QDISC_MQ &&
 298            qdisc == alink->root_qdisc &&
 299            netdev->reg_state == NETREG_UNREGISTERING)
 300                return;
 301
 302        /* Count refs held by MQ instances and clear pointers */
 303        radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
 304                struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
 305                unsigned int i;
 306
 307                if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
 308                        continue;
 309                for (i = 0; i < mq->num_children; i++)
 310                        if (mq->children[i] == qdisc) {
 311                                mq->children[i] = NULL;
 312                                mq_refs++;
 313                        }
 314        }
 315
 316        WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
 317             qdisc->use_cnt, mq_refs);
 318}
 319
 320static void
 321nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
 322                   struct nfp_qdisc *qdisc)
 323{
 324        struct nfp_port *port = nfp_port_from_netdev(netdev);
 325
 326        if (!qdisc)
 327                return;
 328        nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
 329        WARN_ON(radix_tree_delete(&alink->qdiscs,
 330                                  TC_H_MAJ(qdisc->handle)) != qdisc);
 331
 332        kfree(qdisc->children);
 333        kfree(qdisc);
 334
 335        port->tc_offload_cnt--;
 336}
 337
 338static struct nfp_qdisc *
 339nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
 340                    enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
 341                    unsigned int children)
 342{
 343        struct nfp_port *port = nfp_port_from_netdev(netdev);
 344        struct nfp_qdisc *qdisc;
 345        int err;
 346
 347        qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
 348        if (!qdisc)
 349                return NULL;
 350
 351        if (children) {
 352                qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
 353                if (!qdisc->children)
 354                        goto err_free_qdisc;
 355        }
 356
 357        qdisc->netdev = netdev;
 358        qdisc->type = type;
 359        qdisc->parent_handle = parent_handle;
 360        qdisc->handle = handle;
 361        qdisc->num_children = children;
 362
 363        err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
 364        if (err) {
 365                nfp_err(alink->abm->app->cpp,
 366                        "Qdisc insertion into radix tree failed: %d\n", err);
 367                goto err_free_child_tbl;
 368        }
 369
 370        port->tc_offload_cnt++;
 371        return qdisc;
 372
 373err_free_child_tbl:
 374        kfree(qdisc->children);
 375err_free_qdisc:
 376        kfree(qdisc);
 377        return NULL;
 378}
 379
 380static struct nfp_qdisc *
 381nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
 382{
 383        return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
 384}
 385
 386static int
 387nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
 388                      enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
 389                      unsigned int children, struct nfp_qdisc **qdisc)
 390{
 391        *qdisc = nfp_abm_qdisc_find(alink, handle);
 392        if (*qdisc) {
 393                if (WARN_ON((*qdisc)->type != type))
 394                        return -EINVAL;
 395                return 1;
 396        }
 397
 398        *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
 399                                     children);
 400        return *qdisc ? 0 : -ENOMEM;
 401}
 402
 403static void
 404nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
 405                      u32 handle)
 406{
 407        struct nfp_qdisc *qdisc;
 408
 409        qdisc = nfp_abm_qdisc_find(alink, handle);
 410        if (!qdisc)
 411                return;
 412
 413        /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
 414        if (alink->root_qdisc == qdisc)
 415                qdisc->use_cnt--;
 416
 417        nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
 418        nfp_abm_qdisc_free(netdev, alink, qdisc);
 419
 420        if (alink->root_qdisc == qdisc) {
 421                alink->root_qdisc = NULL;
 422                /* Only root change matters, other changes are acted upon on
 423                 * the graft notification.
 424                 */
 425                nfp_abm_qdisc_offload_update(alink);
 426        }
 427}
 428
 429static int
 430nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
 431                    unsigned int id)
 432{
 433        struct nfp_qdisc *parent, *child;
 434
 435        parent = nfp_abm_qdisc_find(alink, handle);
 436        if (!parent)
 437                return 0;
 438
 439        if (WARN(id >= parent->num_children,
 440                 "graft child out of bound %d >= %d\n",
 441                 id, parent->num_children))
 442                return -EINVAL;
 443
 444        nfp_abm_qdisc_unlink_children(parent, id, id + 1);
 445
 446        child = nfp_abm_qdisc_find(alink, child_handle);
 447        if (child)
 448                child->use_cnt++;
 449        else
 450                child = NFP_QDISC_UNTRACKED;
 451        parent->children[id] = child;
 452
 453        nfp_abm_qdisc_offload_update(alink);
 454
 455        return 0;
 456}
 457
 458static void
 459nfp_abm_stats_calculate(struct nfp_alink_stats *new,
 460                        struct nfp_alink_stats *old,
 461                        struct gnet_stats_basic_packed *bstats,
 462                        struct gnet_stats_queue *qstats)
 463{
 464        _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
 465                       new->tx_pkts - old->tx_pkts);
 466        qstats->qlen += new->backlog_pkts - old->backlog_pkts;
 467        qstats->backlog += new->backlog_bytes - old->backlog_bytes;
 468        qstats->overlimits += new->overlimits - old->overlimits;
 469        qstats->drops += new->drops - old->drops;
 470}
 471
 472static void
 473nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
 474                            struct nfp_alink_xstats *old,
 475                            struct red_stats *stats)
 476{
 477        stats->forced_mark += new->ecn_marked - old->ecn_marked;
 478        stats->pdrop += new->pdrop - old->pdrop;
 479}
 480
 481static int
 482nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
 483                   struct tc_gred_qopt_offload_stats *stats)
 484{
 485        struct nfp_qdisc *qdisc;
 486        unsigned int i;
 487
 488        nfp_abm_stats_update(alink);
 489
 490        qdisc = nfp_abm_qdisc_find(alink, handle);
 491        if (!qdisc)
 492                return -EOPNOTSUPP;
 493        /* If the qdisc offload has stopped we may need to adjust the backlog
 494         * counters back so carry on even if qdisc is not currently offloaded.
 495         */
 496
 497        for (i = 0; i < qdisc->red.num_bands; i++) {
 498                if (!stats->xstats[i])
 499                        continue;
 500
 501                nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
 502                                        &qdisc->red.band[i].prev_stats,
 503                                        &stats->bstats[i], &stats->qstats[i]);
 504                qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
 505
 506                nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
 507                                            &qdisc->red.band[i].prev_xstats,
 508                                            stats->xstats[i]);
 509                qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
 510        }
 511
 512        return qdisc->offloaded ? 0 : -EOPNOTSUPP;
 513}
 514
 515static bool
 516nfp_abm_gred_check_params(struct nfp_abm_link *alink,
 517                          struct tc_gred_qopt_offload *opt)
 518{
 519        struct nfp_cpp *cpp = alink->abm->app->cpp;
 520        struct nfp_abm *abm = alink->abm;
 521        unsigned int i;
 522
 523        if (opt->set.grio_on || opt->set.wred_on) {
 524                nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
 525                         opt->parent, opt->handle);
 526                return false;
 527        }
 528        if (opt->set.dp_def != alink->def_band) {
 529                nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
 530                         alink->def_band, opt->parent, opt->handle);
 531                return false;
 532        }
 533        if (opt->set.dp_cnt != abm->num_bands) {
 534                nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
 535                         abm->num_bands, opt->parent, opt->handle);
 536                return false;
 537        }
 538
 539        for (i = 0; i < abm->num_bands; i++) {
 540                struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
 541
 542                if (!band->present)
 543                        return false;
 544                if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
 545                        nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
 546                                 opt->parent, opt->handle, i);
 547                        return false;
 548                }
 549                if (band->is_ecn && !nfp_abm_has_mark(abm)) {
 550                        nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
 551                                 opt->parent, opt->handle, i);
 552                        return false;
 553                }
 554                if (band->is_harddrop) {
 555                        nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
 556                                 opt->parent, opt->handle, i);
 557                        return false;
 558                }
 559                if (band->min != band->max) {
 560                        nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
 561                                 opt->parent, opt->handle, i);
 562                        return false;
 563                }
 564                if (band->min > S32_MAX) {
 565                        nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
 566                                 band->min, S32_MAX, opt->parent, opt->handle,
 567                                 i);
 568                        return false;
 569                }
 570        }
 571
 572        return true;
 573}
 574
 575static int
 576nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
 577                     struct tc_gred_qopt_offload *opt)
 578{
 579        struct nfp_qdisc *qdisc;
 580        unsigned int i;
 581        int ret;
 582
 583        ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
 584                                    opt->handle, 0, &qdisc);
 585        if (ret < 0)
 586                return ret;
 587
 588        qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
 589        if (qdisc->params_ok) {
 590                qdisc->red.num_bands = opt->set.dp_cnt;
 591                for (i = 0; i < qdisc->red.num_bands; i++) {
 592                        qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
 593                        qdisc->red.band[i].threshold = opt->set.tab[i].min;
 594                }
 595        }
 596
 597        if (qdisc->use_cnt)
 598                nfp_abm_qdisc_offload_update(alink);
 599
 600        return 0;
 601}
 602
 603int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
 604                          struct tc_gred_qopt_offload *opt)
 605{
 606        switch (opt->command) {
 607        case TC_GRED_REPLACE:
 608                return nfp_abm_gred_replace(netdev, alink, opt);
 609        case TC_GRED_DESTROY:
 610                nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
 611                return 0;
 612        case TC_GRED_STATS:
 613                return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
 614        default:
 615                return -EOPNOTSUPP;
 616        }
 617}
 618
 619static int
 620nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
 621{
 622        struct nfp_qdisc *qdisc;
 623
 624        nfp_abm_stats_update(alink);
 625
 626        qdisc = nfp_abm_qdisc_find(alink, opt->handle);
 627        if (!qdisc || !qdisc->offloaded)
 628                return -EOPNOTSUPP;
 629
 630        nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
 631                                    &qdisc->red.band[0].prev_xstats,
 632                                    opt->xstats);
 633        qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
 634        return 0;
 635}
 636
 637static int
 638nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
 639                  struct tc_qopt_offload_stats *stats)
 640{
 641        struct nfp_qdisc *qdisc;
 642
 643        nfp_abm_stats_update(alink);
 644
 645        qdisc = nfp_abm_qdisc_find(alink, handle);
 646        if (!qdisc)
 647                return -EOPNOTSUPP;
 648        /* If the qdisc offload has stopped we may need to adjust the backlog
 649         * counters back so carry on even if qdisc is not currently offloaded.
 650         */
 651
 652        nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
 653                                &qdisc->red.band[0].prev_stats,
 654                                stats->bstats, stats->qstats);
 655        qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
 656
 657        return qdisc->offloaded ? 0 : -EOPNOTSUPP;
 658}
 659
 660static bool
 661nfp_abm_red_check_params(struct nfp_abm_link *alink,
 662                         struct tc_red_qopt_offload *opt)
 663{
 664        struct nfp_cpp *cpp = alink->abm->app->cpp;
 665        struct nfp_abm *abm = alink->abm;
 666
 667        if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
 668                nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
 669                         opt->parent, opt->handle);
 670                return false;
 671        }
 672        if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
 673                nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
 674                         opt->parent, opt->handle);
 675                return false;
 676        }
 677        if (opt->set.is_harddrop) {
 678                nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
 679                         opt->parent, opt->handle);
 680                return false;
 681        }
 682        if (opt->set.min != opt->set.max) {
 683                nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
 684                         opt->parent, opt->handle);
 685                return false;
 686        }
 687        if (opt->set.min > NFP_ABM_LVL_INFINITY) {
 688                nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
 689                         opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
 690                         opt->handle);
 691                return false;
 692        }
 693
 694        return true;
 695}
 696
 697static int
 698nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
 699                    struct tc_red_qopt_offload *opt)
 700{
 701        struct nfp_qdisc *qdisc;
 702        int ret;
 703
 704        ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
 705                                    opt->handle, 1, &qdisc);
 706        if (ret < 0)
 707                return ret;
 708
 709        /* If limit != 0 child gets reset */
 710        if (opt->set.limit) {
 711                if (nfp_abm_qdisc_child_valid(qdisc, 0))
 712                        qdisc->children[0]->use_cnt--;
 713                qdisc->children[0] = NULL;
 714        } else {
 715                /* Qdisc was just allocated without a limit will use noop_qdisc,
 716                 * i.e. a block hole.
 717                 */
 718                if (!ret)
 719                        qdisc->children[0] = NFP_QDISC_UNTRACKED;
 720        }
 721
 722        qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
 723        if (qdisc->params_ok) {
 724                qdisc->red.num_bands = 1;
 725                qdisc->red.band[0].ecn = opt->set.is_ecn;
 726                qdisc->red.band[0].threshold = opt->set.min;
 727        }
 728
 729        if (qdisc->use_cnt == 1)
 730                nfp_abm_qdisc_offload_update(alink);
 731
 732        return 0;
 733}
 734
 735int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
 736                         struct tc_red_qopt_offload *opt)
 737{
 738        switch (opt->command) {
 739        case TC_RED_REPLACE:
 740                return nfp_abm_red_replace(netdev, alink, opt);
 741        case TC_RED_DESTROY:
 742                nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
 743                return 0;
 744        case TC_RED_STATS:
 745                return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
 746        case TC_RED_XSTATS:
 747                return nfp_abm_red_xstats(alink, opt);
 748        case TC_RED_GRAFT:
 749                return nfp_abm_qdisc_graft(alink, opt->handle,
 750                                           opt->child_handle, 0);
 751        default:
 752                return -EOPNOTSUPP;
 753        }
 754}
 755
 756static int
 757nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
 758                  struct tc_mq_qopt_offload *opt)
 759{
 760        struct nfp_qdisc *qdisc;
 761        int ret;
 762
 763        ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
 764                                    TC_H_ROOT, opt->handle, alink->total_queues,
 765                                    &qdisc);
 766        if (ret < 0)
 767                return ret;
 768
 769        qdisc->params_ok = true;
 770        qdisc->offloaded = true;
 771        nfp_abm_qdisc_offload_update(alink);
 772        return 0;
 773}
 774
 775static int
 776nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
 777                 struct tc_qopt_offload_stats *stats)
 778{
 779        struct nfp_qdisc *qdisc, *red;
 780        unsigned int i, j;
 781
 782        qdisc = nfp_abm_qdisc_find(alink, handle);
 783        if (!qdisc)
 784                return -EOPNOTSUPP;
 785
 786        nfp_abm_stats_update(alink);
 787
 788        /* MQ stats are summed over the children in the core, so we need
 789         * to add up the unreported child values.
 790         */
 791        memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
 792        memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
 793
 794        for (i = 0; i < qdisc->num_children; i++) {
 795                if (!nfp_abm_qdisc_child_valid(qdisc, i))
 796                        continue;
 797
 798                if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
 799                        continue;
 800                red = qdisc->children[i];
 801
 802                for (j = 0; j < red->red.num_bands; j++) {
 803                        nfp_abm_stats_propagate(&qdisc->mq.stats,
 804                                                &red->red.band[j].stats);
 805                        nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
 806                                                &red->red.band[j].prev_stats);
 807                }
 808        }
 809
 810        nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
 811                                stats->bstats, stats->qstats);
 812
 813        return qdisc->offloaded ? 0 : -EOPNOTSUPP;
 814}
 815
 816int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
 817                        struct tc_mq_qopt_offload *opt)
 818{
 819        switch (opt->command) {
 820        case TC_MQ_CREATE:
 821                return nfp_abm_mq_create(netdev, alink, opt);
 822        case TC_MQ_DESTROY:
 823                nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
 824                return 0;
 825        case TC_MQ_STATS:
 826                return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
 827        case TC_MQ_GRAFT:
 828                return nfp_abm_qdisc_graft(alink, opt->handle,
 829                                           opt->graft_params.child_handle,
 830                                           opt->graft_params.queue);
 831        default:
 832                return -EOPNOTSUPP;
 833        }
 834}
 835
 836int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
 837                       struct tc_root_qopt_offload *opt)
 838{
 839        if (opt->ingress)
 840                return -EOPNOTSUPP;
 841        if (alink->root_qdisc)
 842                alink->root_qdisc->use_cnt--;
 843        alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
 844        if (alink->root_qdisc)
 845                alink->root_qdisc->use_cnt++;
 846
 847        nfp_abm_qdisc_offload_update(alink);
 848
 849        return 0;
 850}
 851