linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
   3
   4#include "cxgb4.h"
   5#include "cxgb4_tc_mqprio.h"
   6#include "sched.h"
   7
   8static int cxgb4_mqprio_validate(struct net_device *dev,
   9                                 struct tc_mqprio_qopt_offload *mqprio)
  10{
  11        u64 min_rate = 0, max_rate = 0, max_link_rate;
  12        struct port_info *pi = netdev2pinfo(dev);
  13        struct adapter *adap = netdev2adap(dev);
  14        u32 speed, qcount = 0, qoffset = 0;
  15        u32 start_a, start_b, end_a, end_b;
  16        int ret;
  17        u8 i, j;
  18
  19        if (!mqprio->qopt.num_tc)
  20                return 0;
  21
  22        if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
  23                netdev_err(dev, "Only full TC hardware offload is supported\n");
  24                return -EINVAL;
  25        } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
  26                netdev_err(dev, "Only channel mode offload is supported\n");
  27                return -EINVAL;
  28        } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
  29                netdev_err(dev, "Only bandwidth rate shaper supported\n");
  30                return -EINVAL;
  31        } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
  32                netdev_err(dev,
  33                           "Only %u traffic classes supported by hardware\n",
  34                           adap->params.nsched_cls);
  35                return -ERANGE;
  36        }
  37
  38        ret = t4_get_link_params(pi, NULL, &speed, NULL);
  39        if (ret) {
  40                netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
  41                return -EINVAL;
  42        }
  43
  44        /* Convert from Mbps to bps */
  45        max_link_rate = (u64)speed * 1000 * 1000;
  46
  47        for (i = 0; i < mqprio->qopt.num_tc; i++) {
  48                qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
  49                qcount += mqprio->qopt.count[i];
  50
  51                start_a = mqprio->qopt.offset[i];
  52                end_a = start_a + mqprio->qopt.count[i] - 1;
  53                for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
  54                        start_b = mqprio->qopt.offset[j];
  55                        end_b = start_b + mqprio->qopt.count[j] - 1;
  56
  57                        /* If queue count is 0, then the traffic
  58                         * belonging to this class will not use
  59                         * ETHOFLD queues. So, no need to validate
  60                         * further.
  61                         */
  62                        if (!mqprio->qopt.count[i])
  63                                break;
  64
  65                        if (!mqprio->qopt.count[j])
  66                                continue;
  67
  68                        if (max_t(u32, start_a, start_b) <=
  69                            min_t(u32, end_a, end_b)) {
  70                                netdev_err(dev,
  71                                           "Queues can't overlap across tc\n");
  72                                return -EINVAL;
  73                        }
  74                }
  75
  76                /* Convert byte per second to bits per second */
  77                min_rate += (mqprio->min_rate[i] * 8);
  78                max_rate += (mqprio->max_rate[i] * 8);
  79        }
  80
  81        if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
  82                return -ENOMEM;
  83
  84        if (min_rate > max_link_rate || max_rate > max_link_rate) {
  85                netdev_err(dev,
  86                           "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
  87                           min_rate, max_rate, max_link_rate);
  88                return -EINVAL;
  89        }
  90
  91        return 0;
  92}
  93
  94static int cxgb4_init_eosw_txq(struct net_device *dev,
  95                               struct sge_eosw_txq *eosw_txq,
  96                               u32 eotid, u32 hwqid)
  97{
  98        struct adapter *adap = netdev2adap(dev);
  99        struct tx_sw_desc *ring;
 100
 101        memset(eosw_txq, 0, sizeof(*eosw_txq));
 102
 103        ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
 104                       sizeof(*ring), GFP_KERNEL);
 105        if (!ring)
 106                return -ENOMEM;
 107
 108        eosw_txq->desc = ring;
 109        eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
 110        spin_lock_init(&eosw_txq->lock);
 111        eosw_txq->state = CXGB4_EO_STATE_CLOSED;
 112        eosw_txq->eotid = eotid;
 113        eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
 114        eosw_txq->cred = adap->params.ofldq_wr_cred;
 115        eosw_txq->hwqid = hwqid;
 116        eosw_txq->netdev = dev;
 117        tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
 118                     (unsigned long)eosw_txq);
 119        return 0;
 120}
 121
 122static void cxgb4_clean_eosw_txq(struct net_device *dev,
 123                                 struct sge_eosw_txq *eosw_txq)
 124{
 125        struct adapter *adap = netdev2adap(dev);
 126
 127        cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
 128        eosw_txq->pidx = 0;
 129        eosw_txq->last_pidx = 0;
 130        eosw_txq->cidx = 0;
 131        eosw_txq->last_cidx = 0;
 132        eosw_txq->flowc_idx = 0;
 133        eosw_txq->inuse = 0;
 134        eosw_txq->cred = adap->params.ofldq_wr_cred;
 135        eosw_txq->ncompl = 0;
 136        eosw_txq->last_compl = 0;
 137        eosw_txq->state = CXGB4_EO_STATE_CLOSED;
 138}
 139
 140static void cxgb4_free_eosw_txq(struct net_device *dev,
 141                                struct sge_eosw_txq *eosw_txq)
 142{
 143        spin_lock_bh(&eosw_txq->lock);
 144        cxgb4_clean_eosw_txq(dev, eosw_txq);
 145        kfree(eosw_txq->desc);
 146        spin_unlock_bh(&eosw_txq->lock);
 147        tasklet_kill(&eosw_txq->qresume_tsk);
 148}
 149
 150static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
 151{
 152        struct port_info *pi = netdev2pinfo(dev);
 153        struct adapter *adap = netdev2adap(dev);
 154        struct sge_ofld_rxq *eorxq;
 155        struct sge_eohw_txq *eotxq;
 156        int ret, msix = 0;
 157        u32 i;
 158
 159        /* Allocate ETHOFLD hardware queue structures if not done already */
 160        if (!refcount_read(&adap->tc_mqprio->refcnt)) {
 161                adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
 162                                             sizeof(struct sge_ofld_rxq),
 163                                             GFP_KERNEL);
 164                if (!adap->sge.eohw_rxq)
 165                        return -ENOMEM;
 166
 167                adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
 168                                             sizeof(struct sge_eohw_txq),
 169                                             GFP_KERNEL);
 170                if (!adap->sge.eohw_txq) {
 171                        kfree(adap->sge.eohw_rxq);
 172                        return -ENOMEM;
 173                }
 174
 175                refcount_set(&adap->tc_mqprio->refcnt, 1);
 176        } else {
 177                refcount_inc(&adap->tc_mqprio->refcnt);
 178        }
 179
 180        if (!(adap->flags & CXGB4_USING_MSIX))
 181                msix = -((int)adap->sge.intrq.abs_id + 1);
 182
 183        for (i = 0; i < pi->nqsets; i++) {
 184                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 185                eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
 186
 187                /* Allocate Rxqs for receiving ETHOFLD Tx completions */
 188                if (msix >= 0) {
 189                        msix = cxgb4_get_msix_idx_from_bmap(adap);
 190                        if (msix < 0) {
 191                                ret = msix;
 192                                goto out_free_queues;
 193                        }
 194
 195                        eorxq->msix = &adap->msix_info[msix];
 196                        snprintf(eorxq->msix->desc,
 197                                 sizeof(eorxq->msix->desc),
 198                                 "%s-eorxq%d", dev->name, i);
 199                }
 200
 201                init_rspq(adap, &eorxq->rspq,
 202                          CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
 203                          CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
 204                          CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
 205                          CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
 206
 207                eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
 208
 209                ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
 210                                       dev, msix, &eorxq->fl,
 211                                       cxgb4_ethofld_rx_handler,
 212                                       NULL, 0);
 213                if (ret)
 214                        goto out_free_queues;
 215
 216                /* Allocate ETHOFLD hardware Txqs */
 217                eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
 218                ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
 219                                               eorxq->rspq.cntxt_id);
 220                if (ret)
 221                        goto out_free_queues;
 222
 223                /* Allocate IRQs, set IRQ affinity, and start Rx */
 224                if (adap->flags & CXGB4_USING_MSIX) {
 225                        ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
 226                                          eorxq->msix->desc, &eorxq->rspq);
 227                        if (ret)
 228                                goto out_free_msix;
 229
 230                        cxgb4_set_msix_aff(adap, eorxq->msix->vec,
 231                                           &eorxq->msix->aff_mask, i);
 232                }
 233
 234                if (adap->flags & CXGB4_FULL_INIT_DONE)
 235                        cxgb4_enable_rx(adap, &eorxq->rspq);
 236        }
 237
 238        return 0;
 239
 240out_free_msix:
 241        while (i-- > 0) {
 242                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 243
 244                if (adap->flags & CXGB4_FULL_INIT_DONE)
 245                        cxgb4_quiesce_rx(&eorxq->rspq);
 246
 247                if (adap->flags & CXGB4_USING_MSIX) {
 248                        cxgb4_clear_msix_aff(eorxq->msix->vec,
 249                                             eorxq->msix->aff_mask);
 250                        free_irq(eorxq->msix->vec, &eorxq->rspq);
 251                }
 252        }
 253
 254out_free_queues:
 255        for (i = 0; i < pi->nqsets; i++) {
 256                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 257                eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
 258
 259                if (eorxq->rspq.desc)
 260                        free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
 261                if (eorxq->msix)
 262                        cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
 263                t4_sge_free_ethofld_txq(adap, eotxq);
 264        }
 265
 266        if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
 267                kfree(adap->sge.eohw_txq);
 268                kfree(adap->sge.eohw_rxq);
 269        }
 270        return ret;
 271}
 272
 273static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
 274{
 275        struct port_info *pi = netdev2pinfo(dev);
 276        struct adapter *adap = netdev2adap(dev);
 277        struct sge_ofld_rxq *eorxq;
 278        struct sge_eohw_txq *eotxq;
 279        u32 i;
 280
 281        /* Return if no ETHOFLD structures have been allocated yet */
 282        if (!refcount_read(&adap->tc_mqprio->refcnt))
 283                return;
 284
 285        /* Return if no hardware queues have been allocated */
 286        if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
 287                return;
 288
 289        for (i = 0; i < pi->nqsets; i++) {
 290                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 291                eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
 292
 293                /* Device removal path will already disable NAPI
 294                 * before unregistering netdevice. So, only disable
 295                 * NAPI if we're not in device removal path
 296                 */
 297                if (!(adap->flags & CXGB4_SHUTTING_DOWN))
 298                        cxgb4_quiesce_rx(&eorxq->rspq);
 299
 300                if (adap->flags & CXGB4_USING_MSIX) {
 301                        cxgb4_clear_msix_aff(eorxq->msix->vec,
 302                                             eorxq->msix->aff_mask);
 303                        free_irq(eorxq->msix->vec, &eorxq->rspq);
 304                        cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
 305                }
 306
 307                free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
 308                t4_sge_free_ethofld_txq(adap, eotxq);
 309        }
 310
 311        /* Free up ETHOFLD structures if there are no users */
 312        if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
 313                kfree(adap->sge.eohw_txq);
 314                kfree(adap->sge.eohw_rxq);
 315        }
 316}
 317
 318static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
 319                                 struct tc_mqprio_qopt_offload *mqprio)
 320{
 321        struct ch_sched_params p = {
 322                .type = SCHED_CLASS_TYPE_PACKET,
 323                .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
 324                .u.params.mode = SCHED_CLASS_MODE_FLOW,
 325                .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
 326                .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
 327                .u.params.class = SCHED_CLS_NONE,
 328                .u.params.weight = 0,
 329                .u.params.pktsize = dev->mtu,
 330        };
 331        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 332        struct port_info *pi = netdev2pinfo(dev);
 333        struct adapter *adap = netdev2adap(dev);
 334        struct sched_class *e;
 335        int ret;
 336        u8 i;
 337
 338        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 339        p.u.params.channel = pi->tx_chan;
 340        for (i = 0; i < mqprio->qopt.num_tc; i++) {
 341                /* Convert from bytes per second to Kbps */
 342                p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
 343                p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
 344
 345                /* Request larger burst buffer for smaller MTU, so
 346                 * that hardware can work on more data per burst
 347                 * cycle.
 348                 */
 349                if (dev->mtu <= ETH_DATA_LEN)
 350                        p.u.params.burstsize = 8 * dev->mtu;
 351
 352                e = cxgb4_sched_class_alloc(dev, &p);
 353                if (!e) {
 354                        ret = -ENOMEM;
 355                        goto out_err;
 356                }
 357
 358                tc_port_mqprio->tc_hwtc_map[i] = e->idx;
 359        }
 360
 361        return 0;
 362
 363out_err:
 364        while (i--)
 365                cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
 366
 367        return ret;
 368}
 369
 370static void cxgb4_mqprio_free_tc(struct net_device *dev)
 371{
 372        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 373        struct port_info *pi = netdev2pinfo(dev);
 374        struct adapter *adap = netdev2adap(dev);
 375        u8 i;
 376
 377        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 378        for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
 379                cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
 380}
 381
 382static int cxgb4_mqprio_class_bind(struct net_device *dev,
 383                                   struct sge_eosw_txq *eosw_txq,
 384                                   u8 tc)
 385{
 386        struct ch_sched_flowc fe;
 387        int ret;
 388
 389        init_completion(&eosw_txq->completion);
 390
 391        fe.tid = eosw_txq->eotid;
 392        fe.class = tc;
 393
 394        ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
 395        if (ret)
 396                return ret;
 397
 398        ret = wait_for_completion_timeout(&eosw_txq->completion,
 399                                          CXGB4_FLOWC_WAIT_TIMEOUT);
 400        if (!ret)
 401                return -ETIMEDOUT;
 402
 403        return 0;
 404}
 405
 406static void cxgb4_mqprio_class_unbind(struct net_device *dev,
 407                                      struct sge_eosw_txq *eosw_txq,
 408                                      u8 tc)
 409{
 410        struct adapter *adap = netdev2adap(dev);
 411        struct ch_sched_flowc fe;
 412
 413        /* If we're shutting down, interrupts are disabled and no completions
 414         * come back. So, skip waiting for completions in this scenario.
 415         */
 416        if (!(adap->flags & CXGB4_SHUTTING_DOWN))
 417                init_completion(&eosw_txq->completion);
 418
 419        fe.tid = eosw_txq->eotid;
 420        fe.class = tc;
 421        cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
 422
 423        if (!(adap->flags & CXGB4_SHUTTING_DOWN))
 424                wait_for_completion_timeout(&eosw_txq->completion,
 425                                            CXGB4_FLOWC_WAIT_TIMEOUT);
 426}
 427
 428static int cxgb4_mqprio_enable_offload(struct net_device *dev,
 429                                       struct tc_mqprio_qopt_offload *mqprio)
 430{
 431        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 432        u32 qoffset, qcount, tot_qcount, qid, hwqid;
 433        struct port_info *pi = netdev2pinfo(dev);
 434        struct adapter *adap = netdev2adap(dev);
 435        struct sge_eosw_txq *eosw_txq;
 436        int eotid, ret;
 437        u16 i, j;
 438        u8 hwtc;
 439
 440        ret = cxgb4_mqprio_alloc_hw_resources(dev);
 441        if (ret)
 442                return -ENOMEM;
 443
 444        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 445        for (i = 0; i < mqprio->qopt.num_tc; i++) {
 446                qoffset = mqprio->qopt.offset[i];
 447                qcount = mqprio->qopt.count[i];
 448                for (j = 0; j < qcount; j++) {
 449                        eotid = cxgb4_get_free_eotid(&adap->tids);
 450                        if (eotid < 0) {
 451                                ret = -ENOMEM;
 452                                goto out_free_eotids;
 453                        }
 454
 455                        qid = qoffset + j;
 456                        hwqid = pi->first_qset + (eotid % pi->nqsets);
 457                        eosw_txq = &tc_port_mqprio->eosw_txq[qid];
 458                        ret = cxgb4_init_eosw_txq(dev, eosw_txq,
 459                                                  eotid, hwqid);
 460                        if (ret)
 461                                goto out_free_eotids;
 462
 463                        cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
 464
 465                        hwtc = tc_port_mqprio->tc_hwtc_map[i];
 466                        ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
 467                        if (ret)
 468                                goto out_free_eotids;
 469                }
 470        }
 471
 472        memcpy(&tc_port_mqprio->mqprio, mqprio,
 473               sizeof(struct tc_mqprio_qopt_offload));
 474
 475        /* Inform the stack about the configured tc params.
 476         *
 477         * Set the correct queue map. If no queue count has been
 478         * specified, then send the traffic through default NIC
 479         * queues; instead of ETHOFLD queues.
 480         */
 481        ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
 482        if (ret)
 483                goto out_free_eotids;
 484
 485        tot_qcount = pi->nqsets;
 486        for (i = 0; i < mqprio->qopt.num_tc; i++) {
 487                qcount = mqprio->qopt.count[i];
 488                if (qcount) {
 489                        qoffset = mqprio->qopt.offset[i] + pi->nqsets;
 490                } else {
 491                        qcount = pi->nqsets;
 492                        qoffset = 0;
 493                }
 494
 495                ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
 496                if (ret)
 497                        goto out_reset_tc;
 498
 499                tot_qcount += mqprio->qopt.count[i];
 500        }
 501
 502        ret = netif_set_real_num_tx_queues(dev, tot_qcount);
 503        if (ret)
 504                goto out_reset_tc;
 505
 506        tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
 507        return 0;
 508
 509out_reset_tc:
 510        netdev_reset_tc(dev);
 511        i = mqprio->qopt.num_tc;
 512
 513out_free_eotids:
 514        while (i-- > 0) {
 515                qoffset = mqprio->qopt.offset[i];
 516                qcount = mqprio->qopt.count[i];
 517                for (j = 0; j < qcount; j++) {
 518                        eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
 519
 520                        hwtc = tc_port_mqprio->tc_hwtc_map[i];
 521                        cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
 522
 523                        cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
 524                        cxgb4_free_eosw_txq(dev, eosw_txq);
 525                }
 526        }
 527
 528        cxgb4_mqprio_free_hw_resources(dev);
 529        return ret;
 530}
 531
 532static void cxgb4_mqprio_disable_offload(struct net_device *dev)
 533{
 534        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 535        struct port_info *pi = netdev2pinfo(dev);
 536        struct adapter *adap = netdev2adap(dev);
 537        struct sge_eosw_txq *eosw_txq;
 538        u32 qoffset, qcount;
 539        u16 i, j;
 540        u8 hwtc;
 541
 542        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 543        if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
 544                return;
 545
 546        netdev_reset_tc(dev);
 547        netif_set_real_num_tx_queues(dev, pi->nqsets);
 548
 549        for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
 550                qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
 551                qcount = tc_port_mqprio->mqprio.qopt.count[i];
 552                for (j = 0; j < qcount; j++) {
 553                        eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
 554
 555                        hwtc = tc_port_mqprio->tc_hwtc_map[i];
 556                        cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
 557
 558                        cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
 559                        cxgb4_free_eosw_txq(dev, eosw_txq);
 560                }
 561        }
 562
 563        cxgb4_mqprio_free_hw_resources(dev);
 564
 565        /* Free up the traffic classes */
 566        cxgb4_mqprio_free_tc(dev);
 567
 568        memset(&tc_port_mqprio->mqprio, 0,
 569               sizeof(struct tc_mqprio_qopt_offload));
 570
 571        tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
 572}
 573
 574int cxgb4_setup_tc_mqprio(struct net_device *dev,
 575                          struct tc_mqprio_qopt_offload *mqprio)
 576{
 577        struct adapter *adap = netdev2adap(dev);
 578        bool needs_bring_up = false;
 579        int ret;
 580
 581        ret = cxgb4_mqprio_validate(dev, mqprio);
 582        if (ret)
 583                return ret;
 584
 585        mutex_lock(&adap->tc_mqprio->mqprio_mutex);
 586
 587        /* To configure tc params, the current allocated EOTIDs must
 588         * be freed up. However, they can't be freed up if there's
 589         * traffic running on the interface. So, ensure interface is
 590         * down before configuring tc params.
 591         */
 592        if (netif_running(dev)) {
 593                cxgb_close(dev);
 594                needs_bring_up = true;
 595        }
 596
 597        cxgb4_mqprio_disable_offload(dev);
 598
 599        /* If requested for clear, then just return since resources are
 600         * already freed up by now.
 601         */
 602        if (!mqprio->qopt.num_tc)
 603                goto out;
 604
 605        /* Allocate free available traffic classes and configure
 606         * their rate parameters.
 607         */
 608        ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
 609        if (ret)
 610                goto out;
 611
 612        ret = cxgb4_mqprio_enable_offload(dev, mqprio);
 613        if (ret) {
 614                cxgb4_mqprio_free_tc(dev);
 615                goto out;
 616        }
 617
 618out:
 619        if (needs_bring_up)
 620                cxgb_open(dev);
 621
 622        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
 623        return ret;
 624}
 625
 626void cxgb4_mqprio_stop_offload(struct adapter *adap)
 627{
 628        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 629        struct net_device *dev;
 630        u8 i;
 631
 632        if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
 633                return;
 634
 635        mutex_lock(&adap->tc_mqprio->mqprio_mutex);
 636        for_each_port(adap, i) {
 637                dev = adap->port[i];
 638                if (!dev)
 639                        continue;
 640
 641                tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
 642                if (!tc_port_mqprio->mqprio.qopt.num_tc)
 643                        continue;
 644
 645                cxgb4_mqprio_disable_offload(dev);
 646        }
 647        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
 648}
 649
 650int cxgb4_init_tc_mqprio(struct adapter *adap)
 651{
 652        struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
 653        struct cxgb4_tc_mqprio *tc_mqprio;
 654        struct sge_eosw_txq *eosw_txq;
 655        int ret = 0;
 656        u8 i;
 657
 658        tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
 659        if (!tc_mqprio)
 660                return -ENOMEM;
 661
 662        tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
 663                                 GFP_KERNEL);
 664        if (!tc_port_mqprio) {
 665                ret = -ENOMEM;
 666                goto out_free_mqprio;
 667        }
 668
 669        mutex_init(&tc_mqprio->mqprio_mutex);
 670
 671        tc_mqprio->port_mqprio = tc_port_mqprio;
 672        for (i = 0; i < adap->params.nports; i++) {
 673                port_mqprio = &tc_mqprio->port_mqprio[i];
 674                eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
 675                                   GFP_KERNEL);
 676                if (!eosw_txq) {
 677                        ret = -ENOMEM;
 678                        goto out_free_ports;
 679                }
 680                port_mqprio->eosw_txq = eosw_txq;
 681        }
 682
 683        adap->tc_mqprio = tc_mqprio;
 684        refcount_set(&adap->tc_mqprio->refcnt, 0);
 685        return 0;
 686
 687out_free_ports:
 688        for (i = 0; i < adap->params.nports; i++) {
 689                port_mqprio = &tc_mqprio->port_mqprio[i];
 690                kfree(port_mqprio->eosw_txq);
 691        }
 692        kfree(tc_port_mqprio);
 693
 694out_free_mqprio:
 695        kfree(tc_mqprio);
 696        return ret;
 697}
 698
 699void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
 700{
 701        struct cxgb4_tc_port_mqprio *port_mqprio;
 702        u8 i;
 703
 704        if (adap->tc_mqprio) {
 705                mutex_lock(&adap->tc_mqprio->mqprio_mutex);
 706                if (adap->tc_mqprio->port_mqprio) {
 707                        for (i = 0; i < adap->params.nports; i++) {
 708                                struct net_device *dev = adap->port[i];
 709
 710                                if (dev)
 711                                        cxgb4_mqprio_disable_offload(dev);
 712                                port_mqprio = &adap->tc_mqprio->port_mqprio[i];
 713                                kfree(port_mqprio->eosw_txq);
 714                        }
 715                        kfree(adap->tc_mqprio->port_mqprio);
 716                }
 717                mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
 718                kfree(adap->tc_mqprio);
 719        }
 720}
 721