linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
   3
   4#include "cxgb4.h"
   5#include "cxgb4_tc_mqprio.h"
   6#include "sched.h"
   7
   8static int cxgb4_mqprio_validate(struct net_device *dev,
   9                                 struct tc_mqprio_qopt_offload *mqprio)
  10{
  11        u64 min_rate = 0, max_rate = 0, max_link_rate;
  12        struct port_info *pi = netdev2pinfo(dev);
  13        struct adapter *adap = netdev2adap(dev);
  14        u32 speed, qcount = 0, qoffset = 0;
  15        u32 start_a, start_b, end_a, end_b;
  16        int ret;
  17        u8 i, j;
  18
  19        if (!mqprio->qopt.num_tc)
  20                return 0;
  21
  22        if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
  23                netdev_err(dev, "Only full TC hardware offload is supported\n");
  24                return -EINVAL;
  25        } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
  26                netdev_err(dev, "Only channel mode offload is supported\n");
  27                return -EINVAL;
  28        } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
  29                netdev_err(dev, "Only bandwidth rate shaper supported\n");
  30                return -EINVAL;
  31        } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
  32                netdev_err(dev,
  33                           "Only %u traffic classes supported by hardware\n",
  34                           adap->params.nsched_cls);
  35                return -ERANGE;
  36        }
  37
  38        ret = t4_get_link_params(pi, NULL, &speed, NULL);
  39        if (ret) {
  40                netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
  41                return -EINVAL;
  42        }
  43
  44        /* Convert from Mbps to bps */
  45        max_link_rate = (u64)speed * 1000 * 1000;
  46
  47        for (i = 0; i < mqprio->qopt.num_tc; i++) {
  48                qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
  49                qcount += mqprio->qopt.count[i];
  50
  51                start_a = mqprio->qopt.offset[i];
  52                end_a = start_a + mqprio->qopt.count[i] - 1;
  53                for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
  54                        start_b = mqprio->qopt.offset[j];
  55                        end_b = start_b + mqprio->qopt.count[j] - 1;
  56
  57                        /* If queue count is 0, then the traffic
  58                         * belonging to this class will not use
  59                         * ETHOFLD queues. So, no need to validate
  60                         * further.
  61                         */
  62                        if (!mqprio->qopt.count[i])
  63                                break;
  64
  65                        if (!mqprio->qopt.count[j])
  66                                continue;
  67
  68                        if (max_t(u32, start_a, start_b) <=
  69                            min_t(u32, end_a, end_b)) {
  70                                netdev_err(dev,
  71                                           "Queues can't overlap across tc\n");
  72                                return -EINVAL;
  73                        }
  74                }
  75
  76                /* Convert byte per second to bits per second */
  77                min_rate += (mqprio->min_rate[i] * 8);
  78                max_rate += (mqprio->max_rate[i] * 8);
  79        }
  80
  81        if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
  82                return -ENOMEM;
  83
  84        if (min_rate > max_link_rate || max_rate > max_link_rate) {
  85                netdev_err(dev,
  86                           "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
  87                           min_rate, max_rate, max_link_rate);
  88                return -EINVAL;
  89        }
  90
  91        return 0;
  92}
  93
  94static int cxgb4_init_eosw_txq(struct net_device *dev,
  95                               struct sge_eosw_txq *eosw_txq,
  96                               u32 eotid, u32 hwqid)
  97{
  98        struct adapter *adap = netdev2adap(dev);
  99        struct tx_sw_desc *ring;
 100
 101        memset(eosw_txq, 0, sizeof(*eosw_txq));
 102
 103        ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
 104                       sizeof(*ring), GFP_KERNEL);
 105        if (!ring)
 106                return -ENOMEM;
 107
 108        eosw_txq->desc = ring;
 109        eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
 110        spin_lock_init(&eosw_txq->lock);
 111        eosw_txq->state = CXGB4_EO_STATE_CLOSED;
 112        eosw_txq->eotid = eotid;
 113        eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
 114        eosw_txq->cred = adap->params.ofldq_wr_cred;
 115        eosw_txq->hwqid = hwqid;
 116        eosw_txq->netdev = dev;
 117        tasklet_setup(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart);
 118        return 0;
 119}
 120
 121static void cxgb4_clean_eosw_txq(struct net_device *dev,
 122                                 struct sge_eosw_txq *eosw_txq)
 123{
 124        struct adapter *adap = netdev2adap(dev);
 125
 126        cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
 127        eosw_txq->pidx = 0;
 128        eosw_txq->last_pidx = 0;
 129        eosw_txq->cidx = 0;
 130        eosw_txq->last_cidx = 0;
 131        eosw_txq->flowc_idx = 0;
 132        eosw_txq->inuse = 0;
 133        eosw_txq->cred = adap->params.ofldq_wr_cred;
 134        eosw_txq->ncompl = 0;
 135        eosw_txq->last_compl = 0;
 136        eosw_txq->state = CXGB4_EO_STATE_CLOSED;
 137}
 138
 139static void cxgb4_free_eosw_txq(struct net_device *dev,
 140                                struct sge_eosw_txq *eosw_txq)
 141{
 142        spin_lock_bh(&eosw_txq->lock);
 143        cxgb4_clean_eosw_txq(dev, eosw_txq);
 144        kfree(eosw_txq->desc);
 145        spin_unlock_bh(&eosw_txq->lock);
 146        tasklet_kill(&eosw_txq->qresume_tsk);
 147}
 148
 149static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
 150{
 151        struct port_info *pi = netdev2pinfo(dev);
 152        struct adapter *adap = netdev2adap(dev);
 153        struct sge_ofld_rxq *eorxq;
 154        struct sge_eohw_txq *eotxq;
 155        int ret, msix = 0;
 156        u32 i;
 157
 158        /* Allocate ETHOFLD hardware queue structures if not done already */
 159        if (!refcount_read(&adap->tc_mqprio->refcnt)) {
 160                adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
 161                                             sizeof(struct sge_ofld_rxq),
 162                                             GFP_KERNEL);
 163                if (!adap->sge.eohw_rxq)
 164                        return -ENOMEM;
 165
 166                adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
 167                                             sizeof(struct sge_eohw_txq),
 168                                             GFP_KERNEL);
 169                if (!adap->sge.eohw_txq) {
 170                        kfree(adap->sge.eohw_rxq);
 171                        return -ENOMEM;
 172                }
 173
 174                refcount_set(&adap->tc_mqprio->refcnt, 1);
 175        } else {
 176                refcount_inc(&adap->tc_mqprio->refcnt);
 177        }
 178
 179        if (!(adap->flags & CXGB4_USING_MSIX))
 180                msix = -((int)adap->sge.intrq.abs_id + 1);
 181
 182        for (i = 0; i < pi->nqsets; i++) {
 183                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 184                eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
 185
 186                /* Allocate Rxqs for receiving ETHOFLD Tx completions */
 187                if (msix >= 0) {
 188                        msix = cxgb4_get_msix_idx_from_bmap(adap);
 189                        if (msix < 0) {
 190                                ret = msix;
 191                                goto out_free_queues;
 192                        }
 193
 194                        eorxq->msix = &adap->msix_info[msix];
 195                        snprintf(eorxq->msix->desc,
 196                                 sizeof(eorxq->msix->desc),
 197                                 "%s-eorxq%d", dev->name, i);
 198                }
 199
 200                init_rspq(adap, &eorxq->rspq,
 201                          CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
 202                          CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
 203                          CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
 204                          CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
 205
 206                eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
 207
 208                ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
 209                                       dev, msix, &eorxq->fl,
 210                                       cxgb4_ethofld_rx_handler,
 211                                       NULL, 0);
 212                if (ret)
 213                        goto out_free_queues;
 214
 215                /* Allocate ETHOFLD hardware Txqs */
 216                eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
 217                ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
 218                                               eorxq->rspq.cntxt_id);
 219                if (ret)
 220                        goto out_free_queues;
 221
 222                /* Allocate IRQs, set IRQ affinity, and start Rx */
 223                if (adap->flags & CXGB4_USING_MSIX) {
 224                        ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
 225                                          eorxq->msix->desc, &eorxq->rspq);
 226                        if (ret)
 227                                goto out_free_msix;
 228
 229                        cxgb4_set_msix_aff(adap, eorxq->msix->vec,
 230                                           &eorxq->msix->aff_mask, i);
 231                }
 232
 233                if (adap->flags & CXGB4_FULL_INIT_DONE)
 234                        cxgb4_enable_rx(adap, &eorxq->rspq);
 235        }
 236
 237        return 0;
 238
 239out_free_msix:
 240        while (i-- > 0) {
 241                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 242
 243                if (adap->flags & CXGB4_FULL_INIT_DONE)
 244                        cxgb4_quiesce_rx(&eorxq->rspq);
 245
 246                if (adap->flags & CXGB4_USING_MSIX) {
 247                        cxgb4_clear_msix_aff(eorxq->msix->vec,
 248                                             eorxq->msix->aff_mask);
 249                        free_irq(eorxq->msix->vec, &eorxq->rspq);
 250                }
 251        }
 252
 253out_free_queues:
 254        for (i = 0; i < pi->nqsets; i++) {
 255                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 256                eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
 257
 258                if (eorxq->rspq.desc)
 259                        free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
 260                if (eorxq->msix)
 261                        cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
 262                t4_sge_free_ethofld_txq(adap, eotxq);
 263        }
 264
 265        if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
 266                kfree(adap->sge.eohw_txq);
 267                kfree(adap->sge.eohw_rxq);
 268        }
 269        return ret;
 270}
 271
 272static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
 273{
 274        struct port_info *pi = netdev2pinfo(dev);
 275        struct adapter *adap = netdev2adap(dev);
 276        struct sge_ofld_rxq *eorxq;
 277        struct sge_eohw_txq *eotxq;
 278        u32 i;
 279
 280        /* Return if no ETHOFLD structures have been allocated yet */
 281        if (!refcount_read(&adap->tc_mqprio->refcnt))
 282                return;
 283
 284        /* Return if no hardware queues have been allocated */
 285        if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
 286                return;
 287
 288        for (i = 0; i < pi->nqsets; i++) {
 289                eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
 290                eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
 291
 292                /* Device removal path will already disable NAPI
 293                 * before unregistering netdevice. So, only disable
 294                 * NAPI if we're not in device removal path
 295                 */
 296                if (!(adap->flags & CXGB4_SHUTTING_DOWN))
 297                        cxgb4_quiesce_rx(&eorxq->rspq);
 298
 299                if (adap->flags & CXGB4_USING_MSIX) {
 300                        cxgb4_clear_msix_aff(eorxq->msix->vec,
 301                                             eorxq->msix->aff_mask);
 302                        free_irq(eorxq->msix->vec, &eorxq->rspq);
 303                        cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
 304                }
 305
 306                free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
 307                t4_sge_free_ethofld_txq(adap, eotxq);
 308        }
 309
 310        /* Free up ETHOFLD structures if there are no users */
 311        if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
 312                kfree(adap->sge.eohw_txq);
 313                kfree(adap->sge.eohw_rxq);
 314        }
 315}
 316
 317static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
 318                                 struct tc_mqprio_qopt_offload *mqprio)
 319{
 320        struct ch_sched_params p = {
 321                .type = SCHED_CLASS_TYPE_PACKET,
 322                .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
 323                .u.params.mode = SCHED_CLASS_MODE_FLOW,
 324                .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
 325                .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
 326                .u.params.class = SCHED_CLS_NONE,
 327                .u.params.weight = 0,
 328                .u.params.pktsize = dev->mtu,
 329        };
 330        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 331        struct port_info *pi = netdev2pinfo(dev);
 332        struct adapter *adap = netdev2adap(dev);
 333        struct sched_class *e;
 334        int ret;
 335        u8 i;
 336
 337        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 338        p.u.params.channel = pi->tx_chan;
 339        for (i = 0; i < mqprio->qopt.num_tc; i++) {
 340                /* Convert from bytes per second to Kbps */
 341                p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
 342                p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
 343
 344                /* Request larger burst buffer for smaller MTU, so
 345                 * that hardware can work on more data per burst
 346                 * cycle.
 347                 */
 348                if (dev->mtu <= ETH_DATA_LEN)
 349                        p.u.params.burstsize = 8 * dev->mtu;
 350
 351                e = cxgb4_sched_class_alloc(dev, &p);
 352                if (!e) {
 353                        ret = -ENOMEM;
 354                        goto out_err;
 355                }
 356
 357                tc_port_mqprio->tc_hwtc_map[i] = e->idx;
 358        }
 359
 360        return 0;
 361
 362out_err:
 363        while (i--)
 364                cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
 365
 366        return ret;
 367}
 368
 369static void cxgb4_mqprio_free_tc(struct net_device *dev)
 370{
 371        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 372        struct port_info *pi = netdev2pinfo(dev);
 373        struct adapter *adap = netdev2adap(dev);
 374        u8 i;
 375
 376        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 377        for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
 378                cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
 379}
 380
 381static int cxgb4_mqprio_class_bind(struct net_device *dev,
 382                                   struct sge_eosw_txq *eosw_txq,
 383                                   u8 tc)
 384{
 385        struct ch_sched_flowc fe;
 386        int ret;
 387
 388        init_completion(&eosw_txq->completion);
 389
 390        fe.tid = eosw_txq->eotid;
 391        fe.class = tc;
 392
 393        ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
 394        if (ret)
 395                return ret;
 396
 397        ret = wait_for_completion_timeout(&eosw_txq->completion,
 398                                          CXGB4_FLOWC_WAIT_TIMEOUT);
 399        if (!ret)
 400                return -ETIMEDOUT;
 401
 402        return 0;
 403}
 404
 405static void cxgb4_mqprio_class_unbind(struct net_device *dev,
 406                                      struct sge_eosw_txq *eosw_txq,
 407                                      u8 tc)
 408{
 409        struct adapter *adap = netdev2adap(dev);
 410        struct ch_sched_flowc fe;
 411
 412        /* If we're shutting down, interrupts are disabled and no completions
 413         * come back. So, skip waiting for completions in this scenario.
 414         */
 415        if (!(adap->flags & CXGB4_SHUTTING_DOWN))
 416                init_completion(&eosw_txq->completion);
 417
 418        fe.tid = eosw_txq->eotid;
 419        fe.class = tc;
 420        cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
 421
 422        if (!(adap->flags & CXGB4_SHUTTING_DOWN))
 423                wait_for_completion_timeout(&eosw_txq->completion,
 424                                            CXGB4_FLOWC_WAIT_TIMEOUT);
 425}
 426
 427static int cxgb4_mqprio_enable_offload(struct net_device *dev,
 428                                       struct tc_mqprio_qopt_offload *mqprio)
 429{
 430        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 431        u32 qoffset, qcount, tot_qcount, qid, hwqid;
 432        struct port_info *pi = netdev2pinfo(dev);
 433        struct adapter *adap = netdev2adap(dev);
 434        struct sge_eosw_txq *eosw_txq;
 435        int eotid, ret;
 436        u16 i, j;
 437        u8 hwtc;
 438
 439        ret = cxgb4_mqprio_alloc_hw_resources(dev);
 440        if (ret)
 441                return -ENOMEM;
 442
 443        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 444        for (i = 0; i < mqprio->qopt.num_tc; i++) {
 445                qoffset = mqprio->qopt.offset[i];
 446                qcount = mqprio->qopt.count[i];
 447                for (j = 0; j < qcount; j++) {
 448                        eotid = cxgb4_get_free_eotid(&adap->tids);
 449                        if (eotid < 0) {
 450                                ret = -ENOMEM;
 451                                goto out_free_eotids;
 452                        }
 453
 454                        qid = qoffset + j;
 455                        hwqid = pi->first_qset + (eotid % pi->nqsets);
 456                        eosw_txq = &tc_port_mqprio->eosw_txq[qid];
 457                        ret = cxgb4_init_eosw_txq(dev, eosw_txq,
 458                                                  eotid, hwqid);
 459                        if (ret)
 460                                goto out_free_eotids;
 461
 462                        cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
 463
 464                        hwtc = tc_port_mqprio->tc_hwtc_map[i];
 465                        ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
 466                        if (ret)
 467                                goto out_free_eotids;
 468                }
 469        }
 470
 471        memcpy(&tc_port_mqprio->mqprio, mqprio,
 472               sizeof(struct tc_mqprio_qopt_offload));
 473
 474        /* Inform the stack about the configured tc params.
 475         *
 476         * Set the correct queue map. If no queue count has been
 477         * specified, then send the traffic through default NIC
 478         * queues; instead of ETHOFLD queues.
 479         */
 480        ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
 481        if (ret)
 482                goto out_free_eotids;
 483
 484        tot_qcount = pi->nqsets;
 485        for (i = 0; i < mqprio->qopt.num_tc; i++) {
 486                qcount = mqprio->qopt.count[i];
 487                if (qcount) {
 488                        qoffset = mqprio->qopt.offset[i] + pi->nqsets;
 489                } else {
 490                        qcount = pi->nqsets;
 491                        qoffset = 0;
 492                }
 493
 494                ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
 495                if (ret)
 496                        goto out_reset_tc;
 497
 498                tot_qcount += mqprio->qopt.count[i];
 499        }
 500
 501        ret = netif_set_real_num_tx_queues(dev, tot_qcount);
 502        if (ret)
 503                goto out_reset_tc;
 504
 505        tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
 506        return 0;
 507
 508out_reset_tc:
 509        netdev_reset_tc(dev);
 510        i = mqprio->qopt.num_tc;
 511
 512out_free_eotids:
 513        while (i-- > 0) {
 514                qoffset = mqprio->qopt.offset[i];
 515                qcount = mqprio->qopt.count[i];
 516                for (j = 0; j < qcount; j++) {
 517                        eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
 518
 519                        hwtc = tc_port_mqprio->tc_hwtc_map[i];
 520                        cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
 521
 522                        cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
 523                        cxgb4_free_eosw_txq(dev, eosw_txq);
 524                }
 525        }
 526
 527        cxgb4_mqprio_free_hw_resources(dev);
 528        return ret;
 529}
 530
 531static void cxgb4_mqprio_disable_offload(struct net_device *dev)
 532{
 533        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 534        struct port_info *pi = netdev2pinfo(dev);
 535        struct adapter *adap = netdev2adap(dev);
 536        struct sge_eosw_txq *eosw_txq;
 537        u32 qoffset, qcount;
 538        u16 i, j;
 539        u8 hwtc;
 540
 541        tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
 542        if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
 543                return;
 544
 545        netdev_reset_tc(dev);
 546        netif_set_real_num_tx_queues(dev, pi->nqsets);
 547
 548        for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
 549                qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
 550                qcount = tc_port_mqprio->mqprio.qopt.count[i];
 551                for (j = 0; j < qcount; j++) {
 552                        eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
 553
 554                        hwtc = tc_port_mqprio->tc_hwtc_map[i];
 555                        cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
 556
 557                        cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
 558                        cxgb4_free_eosw_txq(dev, eosw_txq);
 559                }
 560        }
 561
 562        cxgb4_mqprio_free_hw_resources(dev);
 563
 564        /* Free up the traffic classes */
 565        cxgb4_mqprio_free_tc(dev);
 566
 567        memset(&tc_port_mqprio->mqprio, 0,
 568               sizeof(struct tc_mqprio_qopt_offload));
 569
 570        tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
 571}
 572
 573int cxgb4_setup_tc_mqprio(struct net_device *dev,
 574                          struct tc_mqprio_qopt_offload *mqprio)
 575{
 576        struct adapter *adap = netdev2adap(dev);
 577        bool needs_bring_up = false;
 578        int ret;
 579
 580        ret = cxgb4_mqprio_validate(dev, mqprio);
 581        if (ret)
 582                return ret;
 583
 584        mutex_lock(&adap->tc_mqprio->mqprio_mutex);
 585
 586        /* To configure tc params, the current allocated EOTIDs must
 587         * be freed up. However, they can't be freed up if there's
 588         * traffic running on the interface. So, ensure interface is
 589         * down before configuring tc params.
 590         */
 591        if (netif_running(dev)) {
 592                netif_tx_stop_all_queues(dev);
 593                netif_carrier_off(dev);
 594                needs_bring_up = true;
 595        }
 596
 597        cxgb4_mqprio_disable_offload(dev);
 598
 599        /* If requested for clear, then just return since resources are
 600         * already freed up by now.
 601         */
 602        if (!mqprio->qopt.num_tc)
 603                goto out;
 604
 605        /* Allocate free available traffic classes and configure
 606         * their rate parameters.
 607         */
 608        ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
 609        if (ret)
 610                goto out;
 611
 612        ret = cxgb4_mqprio_enable_offload(dev, mqprio);
 613        if (ret) {
 614                cxgb4_mqprio_free_tc(dev);
 615                goto out;
 616        }
 617
 618out:
 619        if (needs_bring_up) {
 620                netif_tx_start_all_queues(dev);
 621                netif_carrier_on(dev);
 622        }
 623
 624        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
 625        return ret;
 626}
 627
 628void cxgb4_mqprio_stop_offload(struct adapter *adap)
 629{
 630        struct cxgb4_tc_port_mqprio *tc_port_mqprio;
 631        struct net_device *dev;
 632        u8 i;
 633
 634        if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
 635                return;
 636
 637        mutex_lock(&adap->tc_mqprio->mqprio_mutex);
 638        for_each_port(adap, i) {
 639                dev = adap->port[i];
 640                if (!dev)
 641                        continue;
 642
 643                tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
 644                if (!tc_port_mqprio->mqprio.qopt.num_tc)
 645                        continue;
 646
 647                cxgb4_mqprio_disable_offload(dev);
 648        }
 649        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
 650}
 651
 652int cxgb4_init_tc_mqprio(struct adapter *adap)
 653{
 654        struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
 655        struct cxgb4_tc_mqprio *tc_mqprio;
 656        struct sge_eosw_txq *eosw_txq;
 657        int ret = 0;
 658        u8 i;
 659
 660        tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
 661        if (!tc_mqprio)
 662                return -ENOMEM;
 663
 664        tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
 665                                 GFP_KERNEL);
 666        if (!tc_port_mqprio) {
 667                ret = -ENOMEM;
 668                goto out_free_mqprio;
 669        }
 670
 671        mutex_init(&tc_mqprio->mqprio_mutex);
 672
 673        tc_mqprio->port_mqprio = tc_port_mqprio;
 674        for (i = 0; i < adap->params.nports; i++) {
 675                port_mqprio = &tc_mqprio->port_mqprio[i];
 676                eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
 677                                   GFP_KERNEL);
 678                if (!eosw_txq) {
 679                        ret = -ENOMEM;
 680                        goto out_free_ports;
 681                }
 682                port_mqprio->eosw_txq = eosw_txq;
 683        }
 684
 685        adap->tc_mqprio = tc_mqprio;
 686        refcount_set(&adap->tc_mqprio->refcnt, 0);
 687        return 0;
 688
 689out_free_ports:
 690        for (i = 0; i < adap->params.nports; i++) {
 691                port_mqprio = &tc_mqprio->port_mqprio[i];
 692                kfree(port_mqprio->eosw_txq);
 693        }
 694        kfree(tc_port_mqprio);
 695
 696out_free_mqprio:
 697        kfree(tc_mqprio);
 698        return ret;
 699}
 700
 701void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
 702{
 703        struct cxgb4_tc_port_mqprio *port_mqprio;
 704        u8 i;
 705
 706        if (adap->tc_mqprio) {
 707                mutex_lock(&adap->tc_mqprio->mqprio_mutex);
 708                if (adap->tc_mqprio->port_mqprio) {
 709                        for (i = 0; i < adap->params.nports; i++) {
 710                                struct net_device *dev = adap->port[i];
 711
 712                                if (dev)
 713                                        cxgb4_mqprio_disable_offload(dev);
 714                                port_mqprio = &adap->tc_mqprio->port_mqprio[i];
 715                                kfree(port_mqprio->eosw_txq);
 716                        }
 717                        kfree(adap->tc_mqprio->port_mqprio);
 718                }
 719                mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
 720                kfree(adap->tc_mqprio);
 721        }
 722}
 723