linux/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/fs.h>
  34#include "en.h"
  35#include "eswitch.h"
  36
  37struct mlx5e_rq_param {
  38        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
  39        struct mlx5_wq_param       wq;
  40};
  41
  42struct mlx5e_sq_param {
  43        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
  44        struct mlx5_wq_param       wq;
  45        u16                        max_inline;
  46};
  47
  48struct mlx5e_cq_param {
  49        u32                        cqc[MLX5_ST_SZ_DW(cqc)];
  50        struct mlx5_wq_param       wq;
  51        u16                        eq_ix;
  52};
  53
  54struct mlx5e_channel_param {
  55        struct mlx5e_rq_param      rq;
  56        struct mlx5e_sq_param      sq;
  57        struct mlx5e_cq_param      rx_cq;
  58        struct mlx5e_cq_param      tx_cq;
  59};
  60
  61static void mlx5e_update_carrier(struct mlx5e_priv *priv)
  62{
  63        struct mlx5_core_dev *mdev = priv->mdev;
  64        u8 port_state;
  65
  66        port_state = mlx5_query_vport_state(mdev,
  67                MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
  68
  69        if (port_state == VPORT_STATE_UP)
  70                netif_carrier_on(priv->netdev);
  71        else
  72                netif_carrier_off(priv->netdev);
  73}
  74
  75static void mlx5e_update_carrier_work(struct work_struct *work)
  76{
  77        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
  78                                               update_carrier_work);
  79
  80        mutex_lock(&priv->state_lock);
  81        if (test_bit(MLX5E_STATE_OPENED, &priv->state))
  82                mlx5e_update_carrier(priv);
  83        mutex_unlock(&priv->state_lock);
  84}
  85
  86static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
  87{
  88        struct mlx5_core_dev *mdev = priv->mdev;
  89        struct mlx5e_pport_stats *s = &priv->stats.pport;
  90        u32 *in;
  91        u32 *out;
  92        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
  93
  94        in  = mlx5_vzalloc(sz);
  95        out = mlx5_vzalloc(sz);
  96        if (!in || !out)
  97                goto free_out;
  98
  99        MLX5_SET(ppcnt_reg, in, local_port, 1);
 100
 101        MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
 102        mlx5_core_access_reg(mdev, in, sz, out,
 103                             sz, MLX5_REG_PPCNT, 0, 0);
 104        memcpy(s->IEEE_802_3_counters,
 105               MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
 106               sizeof(s->IEEE_802_3_counters));
 107
 108        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
 109        mlx5_core_access_reg(mdev, in, sz, out,
 110                             sz, MLX5_REG_PPCNT, 0, 0);
 111        memcpy(s->RFC_2863_counters,
 112               MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
 113               sizeof(s->RFC_2863_counters));
 114
 115        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
 116        mlx5_core_access_reg(mdev, in, sz, out,
 117                             sz, MLX5_REG_PPCNT, 0, 0);
 118        memcpy(s->RFC_2819_counters,
 119               MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
 120               sizeof(s->RFC_2819_counters));
 121
 122free_out:
 123        kvfree(in);
 124        kvfree(out);
 125}
 126
 127void mlx5e_update_stats(struct mlx5e_priv *priv)
 128{
 129        struct mlx5_core_dev *mdev = priv->mdev;
 130        struct mlx5e_vport_stats *s = &priv->stats.vport;
 131        struct mlx5e_rq_stats *rq_stats;
 132        struct mlx5e_sq_stats *sq_stats;
 133        u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
 134        u32 *out;
 135        int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 136        u64 tx_offload_none;
 137        int i, j;
 138
 139        out = mlx5_vzalloc(outlen);
 140        if (!out)
 141                return;
 142
 143        /* Collect firts the SW counters and then HW for consistency */
 144        s->rx_packets           = 0;
 145        s->rx_bytes             = 0;
 146        s->tx_packets           = 0;
 147        s->tx_bytes             = 0;
 148        s->tso_packets          = 0;
 149        s->tso_bytes            = 0;
 150        s->tx_queue_stopped     = 0;
 151        s->tx_queue_wake        = 0;
 152        s->tx_queue_dropped     = 0;
 153        tx_offload_none         = 0;
 154        s->lro_packets          = 0;
 155        s->lro_bytes            = 0;
 156        s->rx_csum_none         = 0;
 157        s->rx_csum_sw           = 0;
 158        s->rx_wqe_err           = 0;
 159        for (i = 0; i < priv->params.num_channels; i++) {
 160                rq_stats = &priv->channel[i]->rq.stats;
 161
 162                s->rx_packets   += rq_stats->packets;
 163                s->rx_bytes     += rq_stats->bytes;
 164                s->lro_packets  += rq_stats->lro_packets;
 165                s->lro_bytes    += rq_stats->lro_bytes;
 166                s->rx_csum_none += rq_stats->csum_none;
 167                s->rx_csum_sw   += rq_stats->csum_sw;
 168                s->rx_wqe_err   += rq_stats->wqe_err;
 169
 170                for (j = 0; j < priv->params.num_tc; j++) {
 171                        sq_stats = &priv->channel[i]->sq[j].stats;
 172
 173                        s->tx_packets           += sq_stats->packets;
 174                        s->tx_bytes             += sq_stats->bytes;
 175                        s->tso_packets          += sq_stats->tso_packets;
 176                        s->tso_bytes            += sq_stats->tso_bytes;
 177                        s->tx_queue_stopped     += sq_stats->stopped;
 178                        s->tx_queue_wake        += sq_stats->wake;
 179                        s->tx_queue_dropped     += sq_stats->dropped;
 180                        tx_offload_none         += sq_stats->csum_offload_none;
 181                }
 182        }
 183
 184        /* HW counters */
 185        memset(in, 0, sizeof(in));
 186
 187        MLX5_SET(query_vport_counter_in, in, opcode,
 188                 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
 189        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
 190        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 191
 192        memset(out, 0, outlen);
 193
 194        if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
 195                goto free_out;
 196
 197#define MLX5_GET_CTR(p, x) \
 198        MLX5_GET64(query_vport_counter_out, p, x)
 199
 200        s->rx_error_packets     =
 201                MLX5_GET_CTR(out, received_errors.packets);
 202        s->rx_error_bytes       =
 203                MLX5_GET_CTR(out, received_errors.octets);
 204        s->tx_error_packets     =
 205                MLX5_GET_CTR(out, transmit_errors.packets);
 206        s->tx_error_bytes       =
 207                MLX5_GET_CTR(out, transmit_errors.octets);
 208
 209        s->rx_unicast_packets   =
 210                MLX5_GET_CTR(out, received_eth_unicast.packets);
 211        s->rx_unicast_bytes     =
 212                MLX5_GET_CTR(out, received_eth_unicast.octets);
 213        s->tx_unicast_packets   =
 214                MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
 215        s->tx_unicast_bytes     =
 216                MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
 217
 218        s->rx_multicast_packets =
 219                MLX5_GET_CTR(out, received_eth_multicast.packets);
 220        s->rx_multicast_bytes   =
 221                MLX5_GET_CTR(out, received_eth_multicast.octets);
 222        s->tx_multicast_packets =
 223                MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
 224        s->tx_multicast_bytes   =
 225                MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
 226
 227        s->rx_broadcast_packets =
 228                MLX5_GET_CTR(out, received_eth_broadcast.packets);
 229        s->rx_broadcast_bytes   =
 230                MLX5_GET_CTR(out, received_eth_broadcast.octets);
 231        s->tx_broadcast_packets =
 232                MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
 233        s->tx_broadcast_bytes   =
 234                MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
 235
 236        /* Update calculated offload counters */
 237        s->tx_csum_offload = s->tx_packets - tx_offload_none;
 238        s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
 239                               s->rx_csum_sw;
 240
 241        mlx5e_update_pport_counters(priv);
 242free_out:
 243        kvfree(out);
 244}
 245
 246static void mlx5e_update_stats_work(struct work_struct *work)
 247{
 248        struct delayed_work *dwork = to_delayed_work(work);
 249        struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
 250                                               update_stats_work);
 251        mutex_lock(&priv->state_lock);
 252        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 253                mlx5e_update_stats(priv);
 254                schedule_delayed_work(dwork,
 255                                      msecs_to_jiffies(
 256                                              MLX5E_UPDATE_STATS_INTERVAL));
 257        }
 258        mutex_unlock(&priv->state_lock);
 259}
 260
 261static void __mlx5e_async_event(struct mlx5e_priv *priv,
 262                                enum mlx5_dev_event event)
 263{
 264        switch (event) {
 265        case MLX5_DEV_EVENT_PORT_UP:
 266        case MLX5_DEV_EVENT_PORT_DOWN:
 267                schedule_work(&priv->update_carrier_work);
 268                break;
 269
 270        default:
 271                break;
 272        }
 273}
 274
 275static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 276                              enum mlx5_dev_event event, unsigned long param)
 277{
 278        struct mlx5e_priv *priv = vpriv;
 279
 280        spin_lock(&priv->async_events_spinlock);
 281        if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
 282                __mlx5e_async_event(priv, event);
 283        spin_unlock(&priv->async_events_spinlock);
 284}
 285
 286static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
 287{
 288        set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
 289}
 290
 291static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 292{
 293        spin_lock_irq(&priv->async_events_spinlock);
 294        clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
 295        spin_unlock_irq(&priv->async_events_spinlock);
 296}
 297
 298#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 299#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 300
 301static int mlx5e_create_rq(struct mlx5e_channel *c,
 302                           struct mlx5e_rq_param *param,
 303                           struct mlx5e_rq *rq)
 304{
 305        struct mlx5e_priv *priv = c->priv;
 306        struct mlx5_core_dev *mdev = priv->mdev;
 307        void *rqc = param->rqc;
 308        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
 309        int wq_sz;
 310        int err;
 311        int i;
 312
 313        param->wq.db_numa_node = cpu_to_node(c->cpu);
 314
 315        err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
 316                                &rq->wq_ctrl);
 317        if (err)
 318                return err;
 319
 320        rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
 321
 322        wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 323        rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
 324                               cpu_to_node(c->cpu));
 325        if (!rq->skb) {
 326                err = -ENOMEM;
 327                goto err_rq_wq_destroy;
 328        }
 329
 330        rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
 331                                             MLX5E_SW2HW_MTU(priv->netdev->mtu);
 332        rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
 333
 334        for (i = 0; i < wq_sz; i++) {
 335                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
 336                u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
 337
 338                wqe->data.lkey       = c->mkey_be;
 339                wqe->data.byte_count =
 340                        cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
 341        }
 342
 343        rq->pdev    = c->pdev;
 344        rq->netdev  = c->netdev;
 345        rq->tstamp  = &priv->tstamp;
 346        rq->channel = c;
 347        rq->ix      = c->ix;
 348        rq->priv    = c->priv;
 349
 350        return 0;
 351
 352err_rq_wq_destroy:
 353        mlx5_wq_destroy(&rq->wq_ctrl);
 354
 355        return err;
 356}
 357
 358static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 359{
 360        kfree(rq->skb);
 361        mlx5_wq_destroy(&rq->wq_ctrl);
 362}
 363
 364static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 365{
 366        struct mlx5e_priv *priv = rq->priv;
 367        struct mlx5_core_dev *mdev = priv->mdev;
 368
 369        void *in;
 370        void *rqc;
 371        void *wq;
 372        int inlen;
 373        int err;
 374
 375        inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
 376                sizeof(u64) * rq->wq_ctrl.buf.npages;
 377        in = mlx5_vzalloc(inlen);
 378        if (!in)
 379                return -ENOMEM;
 380
 381        rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
 382        wq  = MLX5_ADDR_OF(rqc, rqc, wq);
 383
 384        memcpy(rqc, param->rqc, sizeof(param->rqc));
 385
 386        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
 387        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
 388        MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
 389        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
 390                                                MLX5_ADAPTER_PAGE_SHIFT);
 391        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
 392
 393        mlx5_fill_page_array(&rq->wq_ctrl.buf,
 394                             (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 395
 396        err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
 397
 398        kvfree(in);
 399
 400        return err;
 401}
 402
 403static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
 404{
 405        struct mlx5e_channel *c = rq->channel;
 406        struct mlx5e_priv *priv = c->priv;
 407        struct mlx5_core_dev *mdev = priv->mdev;
 408
 409        void *in;
 410        void *rqc;
 411        int inlen;
 412        int err;
 413
 414        inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
 415        in = mlx5_vzalloc(inlen);
 416        if (!in)
 417                return -ENOMEM;
 418
 419        rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
 420
 421        MLX5_SET(modify_rq_in, in, rq_state, curr_state);
 422        MLX5_SET(rqc, rqc, state, next_state);
 423
 424        err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
 425
 426        kvfree(in);
 427
 428        return err;
 429}
 430
 431static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 432{
 433        mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
 434}
 435
 436static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
 437{
 438        unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
 439        struct mlx5e_channel *c = rq->channel;
 440        struct mlx5e_priv *priv = c->priv;
 441        struct mlx5_wq_ll *wq = &rq->wq;
 442
 443        while (time_before(jiffies, exp_time)) {
 444                if (wq->cur_sz >= priv->params.min_rx_wqes)
 445                        return 0;
 446
 447                msleep(20);
 448        }
 449
 450        return -ETIMEDOUT;
 451}
 452
 453static int mlx5e_open_rq(struct mlx5e_channel *c,
 454                         struct mlx5e_rq_param *param,
 455                         struct mlx5e_rq *rq)
 456{
 457        int err;
 458
 459        err = mlx5e_create_rq(c, param, rq);
 460        if (err)
 461                return err;
 462
 463        err = mlx5e_enable_rq(rq, param);
 464        if (err)
 465                goto err_destroy_rq;
 466
 467        err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
 468        if (err)
 469                goto err_disable_rq;
 470
 471        set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
 472        mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
 473
 474        return 0;
 475
 476err_disable_rq:
 477        mlx5e_disable_rq(rq);
 478err_destroy_rq:
 479        mlx5e_destroy_rq(rq);
 480
 481        return err;
 482}
 483
 484static void mlx5e_close_rq(struct mlx5e_rq *rq)
 485{
 486        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
 487        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 488
 489        mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
 490        while (!mlx5_wq_ll_is_empty(&rq->wq))
 491                msleep(20);
 492
 493        /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
 494        napi_synchronize(&rq->channel->napi);
 495
 496        mlx5e_disable_rq(rq);
 497        mlx5e_destroy_rq(rq);
 498}
 499
 500static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
 501{
 502        kfree(sq->wqe_info);
 503        kfree(sq->dma_fifo);
 504        kfree(sq->skb);
 505}
 506
 507static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
 508{
 509        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
 510        int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
 511
 512        sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
 513        sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
 514                                    numa);
 515        sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
 516                                    numa);
 517
 518        if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
 519                mlx5e_free_sq_db(sq);
 520                return -ENOMEM;
 521        }
 522
 523        sq->dma_fifo_mask = df_sz - 1;
 524
 525        return 0;
 526}
 527
 528static int mlx5e_create_sq(struct mlx5e_channel *c,
 529                           int tc,
 530                           struct mlx5e_sq_param *param,
 531                           struct mlx5e_sq *sq)
 532{
 533        struct mlx5e_priv *priv = c->priv;
 534        struct mlx5_core_dev *mdev = priv->mdev;
 535
 536        void *sqc = param->sqc;
 537        void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
 538        int txq_ix;
 539        int err;
 540
 541        err = mlx5_alloc_map_uar(mdev, &sq->uar);
 542        if (err)
 543                return err;
 544
 545        param->wq.db_numa_node = cpu_to_node(c->cpu);
 546
 547        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
 548                                 &sq->wq_ctrl);
 549        if (err)
 550                goto err_unmap_free_uar;
 551
 552        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
 553        sq->uar_map     = sq->uar.map;
 554        sq->uar_bf_map  = sq->uar.bf_map;
 555        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
 556        sq->max_inline  = param->max_inline;
 557
 558        err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
 559        if (err)
 560                goto err_sq_wq_destroy;
 561
 562        txq_ix = c->ix + tc * priv->params.num_channels;
 563        sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 564
 565        sq->pdev      = c->pdev;
 566        sq->tstamp    = &priv->tstamp;
 567        sq->mkey_be   = c->mkey_be;
 568        sq->channel   = c;
 569        sq->tc        = tc;
 570        sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
 571        sq->bf_budget = MLX5E_SQ_BF_BUDGET;
 572        priv->txq_to_sq_map[txq_ix] = sq;
 573
 574        return 0;
 575
 576err_sq_wq_destroy:
 577        mlx5_wq_destroy(&sq->wq_ctrl);
 578
 579err_unmap_free_uar:
 580        mlx5_unmap_free_uar(mdev, &sq->uar);
 581
 582        return err;
 583}
 584
 585static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
 586{
 587        struct mlx5e_channel *c = sq->channel;
 588        struct mlx5e_priv *priv = c->priv;
 589
 590        mlx5e_free_sq_db(sq);
 591        mlx5_wq_destroy(&sq->wq_ctrl);
 592        mlx5_unmap_free_uar(priv->mdev, &sq->uar);
 593}
 594
 595static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
 596{
 597        struct mlx5e_channel *c = sq->channel;
 598        struct mlx5e_priv *priv = c->priv;
 599        struct mlx5_core_dev *mdev = priv->mdev;
 600
 601        void *in;
 602        void *sqc;
 603        void *wq;
 604        int inlen;
 605        int err;
 606
 607        inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
 608                sizeof(u64) * sq->wq_ctrl.buf.npages;
 609        in = mlx5_vzalloc(inlen);
 610        if (!in)
 611                return -ENOMEM;
 612
 613        sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 614        wq = MLX5_ADDR_OF(sqc, sqc, wq);
 615
 616        memcpy(sqc, param->sqc, sizeof(param->sqc));
 617
 618        MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
 619        MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
 620        MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
 621        MLX5_SET(sqc,  sqc, tis_lst_sz,         1);
 622        MLX5_SET(sqc,  sqc, flush_in_error_en,  1);
 623
 624        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
 625        MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
 626        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
 627                                          MLX5_ADAPTER_PAGE_SHIFT);
 628        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 629
 630        mlx5_fill_page_array(&sq->wq_ctrl.buf,
 631                             (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 632
 633        err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
 634
 635        kvfree(in);
 636
 637        return err;
 638}
 639
 640static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
 641{
 642        struct mlx5e_channel *c = sq->channel;
 643        struct mlx5e_priv *priv = c->priv;
 644        struct mlx5_core_dev *mdev = priv->mdev;
 645
 646        void *in;
 647        void *sqc;
 648        int inlen;
 649        int err;
 650
 651        inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
 652        in = mlx5_vzalloc(inlen);
 653        if (!in)
 654                return -ENOMEM;
 655
 656        sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
 657
 658        MLX5_SET(modify_sq_in, in, sq_state, curr_state);
 659        MLX5_SET(sqc, sqc, state, next_state);
 660
 661        err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
 662
 663        kvfree(in);
 664
 665        return err;
 666}
 667
 668static void mlx5e_disable_sq(struct mlx5e_sq *sq)
 669{
 670        struct mlx5e_channel *c = sq->channel;
 671        struct mlx5e_priv *priv = c->priv;
 672        struct mlx5_core_dev *mdev = priv->mdev;
 673
 674        mlx5_core_destroy_sq(mdev, sq->sqn);
 675}
 676
 677static int mlx5e_open_sq(struct mlx5e_channel *c,
 678                         int tc,
 679                         struct mlx5e_sq_param *param,
 680                         struct mlx5e_sq *sq)
 681{
 682        int err;
 683
 684        err = mlx5e_create_sq(c, tc, param, sq);
 685        if (err)
 686                return err;
 687
 688        err = mlx5e_enable_sq(sq, param);
 689        if (err)
 690                goto err_destroy_sq;
 691
 692        err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
 693        if (err)
 694                goto err_disable_sq;
 695
 696        set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
 697        netdev_tx_reset_queue(sq->txq);
 698        netif_tx_start_queue(sq->txq);
 699
 700        return 0;
 701
 702err_disable_sq:
 703        mlx5e_disable_sq(sq);
 704err_destroy_sq:
 705        mlx5e_destroy_sq(sq);
 706
 707        return err;
 708}
 709
 710static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 711{
 712        __netif_tx_lock_bh(txq);
 713        netif_tx_stop_queue(txq);
 714        __netif_tx_unlock_bh(txq);
 715}
 716
 717static void mlx5e_close_sq(struct mlx5e_sq *sq)
 718{
 719        clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
 720        napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
 721        netif_tx_disable_queue(sq->txq);
 722
 723        /* ensure hw is notified of all pending wqes */
 724        if (mlx5e_sq_has_room_for(sq, 1))
 725                mlx5e_send_nop(sq, true);
 726
 727        mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
 728        while (sq->cc != sq->pc) /* wait till sq is empty */
 729                msleep(20);
 730
 731        /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
 732        napi_synchronize(&sq->channel->napi);
 733
 734        mlx5e_disable_sq(sq);
 735        mlx5e_destroy_sq(sq);
 736}
 737
 738static int mlx5e_create_cq(struct mlx5e_channel *c,
 739                           struct mlx5e_cq_param *param,
 740                           struct mlx5e_cq *cq)
 741{
 742        struct mlx5e_priv *priv = c->priv;
 743        struct mlx5_core_dev *mdev = priv->mdev;
 744        struct mlx5_core_cq *mcq = &cq->mcq;
 745        int eqn_not_used;
 746        unsigned int irqn;
 747        int err;
 748        u32 i;
 749
 750        param->wq.buf_numa_node = cpu_to_node(c->cpu);
 751        param->wq.db_numa_node  = cpu_to_node(c->cpu);
 752        param->eq_ix   = c->ix;
 753
 754        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
 755                               &cq->wq_ctrl);
 756        if (err)
 757                return err;
 758
 759        mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
 760
 761        cq->napi        = &c->napi;
 762
 763        mcq->cqe_sz     = 64;
 764        mcq->set_ci_db  = cq->wq_ctrl.db.db;
 765        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
 766        *mcq->set_ci_db = 0;
 767        *mcq->arm_db    = 0;
 768        mcq->vector     = param->eq_ix;
 769        mcq->comp       = mlx5e_completion_event;
 770        mcq->event      = mlx5e_cq_error_event;
 771        mcq->irqn       = irqn;
 772        mcq->uar        = &priv->cq_uar;
 773
 774        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
 775                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
 776
 777                cqe->op_own = 0xf1;
 778        }
 779
 780        cq->channel = c;
 781        cq->priv = priv;
 782
 783        return 0;
 784}
 785
 786static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
 787{
 788        mlx5_wq_destroy(&cq->wq_ctrl);
 789}
 790
 791static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 792{
 793        struct mlx5e_priv *priv = cq->priv;
 794        struct mlx5_core_dev *mdev = priv->mdev;
 795        struct mlx5_core_cq *mcq = &cq->mcq;
 796
 797        void *in;
 798        void *cqc;
 799        int inlen;
 800        unsigned int irqn_not_used;
 801        int eqn;
 802        int err;
 803
 804        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 805                sizeof(u64) * cq->wq_ctrl.buf.npages;
 806        in = mlx5_vzalloc(inlen);
 807        if (!in)
 808                return -ENOMEM;
 809
 810        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 811
 812        memcpy(cqc, param->cqc, sizeof(param->cqc));
 813
 814        mlx5_fill_page_array(&cq->wq_ctrl.buf,
 815                             (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 816
 817        mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
 818
 819        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
 820        MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
 821        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
 822                                            MLX5_ADAPTER_PAGE_SHIFT);
 823        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 824
 825        err = mlx5_core_create_cq(mdev, mcq, in, inlen);
 826
 827        kvfree(in);
 828
 829        if (err)
 830                return err;
 831
 832        mlx5e_cq_arm(cq);
 833
 834        return 0;
 835}
 836
 837static void mlx5e_disable_cq(struct mlx5e_cq *cq)
 838{
 839        struct mlx5e_priv *priv = cq->priv;
 840        struct mlx5_core_dev *mdev = priv->mdev;
 841
 842        mlx5_core_destroy_cq(mdev, &cq->mcq);
 843}
 844
 845static int mlx5e_open_cq(struct mlx5e_channel *c,
 846                         struct mlx5e_cq_param *param,
 847                         struct mlx5e_cq *cq,
 848                         u16 moderation_usecs,
 849                         u16 moderation_frames)
 850{
 851        int err;
 852        struct mlx5e_priv *priv = c->priv;
 853        struct mlx5_core_dev *mdev = priv->mdev;
 854
 855        err = mlx5e_create_cq(c, param, cq);
 856        if (err)
 857                return err;
 858
 859        err = mlx5e_enable_cq(cq, param);
 860        if (err)
 861                goto err_destroy_cq;
 862
 863        err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
 864                                             moderation_usecs,
 865                                             moderation_frames);
 866        if (err)
 867                goto err_destroy_cq;
 868
 869        return 0;
 870
 871err_destroy_cq:
 872        mlx5e_destroy_cq(cq);
 873
 874        return err;
 875}
 876
 877static void mlx5e_close_cq(struct mlx5e_cq *cq)
 878{
 879        mlx5e_disable_cq(cq);
 880        mlx5e_destroy_cq(cq);
 881}
 882
 883static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
 884{
 885        return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
 886}
 887
 888static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
 889                             struct mlx5e_channel_param *cparam)
 890{
 891        struct mlx5e_priv *priv = c->priv;
 892        int err;
 893        int tc;
 894
 895        for (tc = 0; tc < c->num_tc; tc++) {
 896                err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
 897                                    priv->params.tx_cq_moderation_usec,
 898                                    priv->params.tx_cq_moderation_pkts);
 899                if (err)
 900                        goto err_close_tx_cqs;
 901        }
 902
 903        return 0;
 904
 905err_close_tx_cqs:
 906        for (tc--; tc >= 0; tc--)
 907                mlx5e_close_cq(&c->sq[tc].cq);
 908
 909        return err;
 910}
 911
 912static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
 913{
 914        int tc;
 915
 916        for (tc = 0; tc < c->num_tc; tc++)
 917                mlx5e_close_cq(&c->sq[tc].cq);
 918}
 919
 920static int mlx5e_open_sqs(struct mlx5e_channel *c,
 921                          struct mlx5e_channel_param *cparam)
 922{
 923        int err;
 924        int tc;
 925
 926        for (tc = 0; tc < c->num_tc; tc++) {
 927                err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
 928                if (err)
 929                        goto err_close_sqs;
 930        }
 931
 932        return 0;
 933
 934err_close_sqs:
 935        for (tc--; tc >= 0; tc--)
 936                mlx5e_close_sq(&c->sq[tc]);
 937
 938        return err;
 939}
 940
 941static void mlx5e_close_sqs(struct mlx5e_channel *c)
 942{
 943        int tc;
 944
 945        for (tc = 0; tc < c->num_tc; tc++)
 946                mlx5e_close_sq(&c->sq[tc]);
 947}
 948
 949static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
 950{
 951        int i;
 952
 953        for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
 954                priv->channeltc_to_txq_map[ix][i] =
 955                        ix + i * priv->params.num_channels;
 956}
 957
 958static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 959                              struct mlx5e_channel_param *cparam,
 960                              struct mlx5e_channel **cp)
 961{
 962        struct net_device *netdev = priv->netdev;
 963        int cpu = mlx5e_get_cpu(priv, ix);
 964        struct mlx5e_channel *c;
 965        int err;
 966
 967        c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
 968        if (!c)
 969                return -ENOMEM;
 970
 971        c->priv     = priv;
 972        c->ix       = ix;
 973        c->cpu      = cpu;
 974        c->pdev     = &priv->mdev->pdev->dev;
 975        c->netdev   = priv->netdev;
 976        c->mkey_be  = cpu_to_be32(priv->mr.key);
 977        c->num_tc   = priv->params.num_tc;
 978
 979        mlx5e_build_channeltc_to_txq_map(priv, ix);
 980
 981        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 982
 983        err = mlx5e_open_tx_cqs(c, cparam);
 984        if (err)
 985                goto err_napi_del;
 986
 987        err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
 988                            priv->params.rx_cq_moderation_usec,
 989                            priv->params.rx_cq_moderation_pkts);
 990        if (err)
 991                goto err_close_tx_cqs;
 992
 993        napi_enable(&c->napi);
 994
 995        err = mlx5e_open_sqs(c, cparam);
 996        if (err)
 997                goto err_disable_napi;
 998
 999        err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1000        if (err)
1001                goto err_close_sqs;
1002
1003        netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1004        *cp = c;
1005
1006        return 0;
1007
1008err_close_sqs:
1009        mlx5e_close_sqs(c);
1010
1011err_disable_napi:
1012        napi_disable(&c->napi);
1013        mlx5e_close_cq(&c->rq.cq);
1014
1015err_close_tx_cqs:
1016        mlx5e_close_tx_cqs(c);
1017
1018err_napi_del:
1019        netif_napi_del(&c->napi);
1020        napi_hash_del(&c->napi);
1021        kfree(c);
1022
1023        return err;
1024}
1025
1026static void mlx5e_close_channel(struct mlx5e_channel *c)
1027{
1028        mlx5e_close_rq(&c->rq);
1029        mlx5e_close_sqs(c);
1030        napi_disable(&c->napi);
1031        mlx5e_close_cq(&c->rq.cq);
1032        mlx5e_close_tx_cqs(c);
1033        netif_napi_del(&c->napi);
1034
1035        napi_hash_del(&c->napi);
1036        synchronize_rcu();
1037
1038        kfree(c);
1039}
1040
1041static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1042                                 struct mlx5e_rq_param *param)
1043{
1044        void *rqc = param->rqc;
1045        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1046
1047        MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
1048        MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1049        MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1050        MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
1051        MLX5_SET(wq, wq, pd,               priv->pdn);
1052
1053        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1054        param->wq.linear = 1;
1055}
1056
1057static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1058                                 struct mlx5e_sq_param *param)
1059{
1060        void *sqc = param->sqc;
1061        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1062
1063        MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
1064        MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1065        MLX5_SET(wq, wq, pd,            priv->pdn);
1066
1067        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1068        param->max_inline = priv->params.tx_max_inline;
1069}
1070
1071static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1072                                        struct mlx5e_cq_param *param)
1073{
1074        void *cqc = param->cqc;
1075
1076        MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1077}
1078
1079static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1080                                    struct mlx5e_cq_param *param)
1081{
1082        void *cqc = param->cqc;
1083
1084        MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
1085
1086        mlx5e_build_common_cq_param(priv, param);
1087}
1088
1089static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1090                                    struct mlx5e_cq_param *param)
1091{
1092        void *cqc = param->cqc;
1093
1094        MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
1095
1096        mlx5e_build_common_cq_param(priv, param);
1097}
1098
1099static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1100                                      struct mlx5e_channel_param *cparam)
1101{
1102        memset(cparam, 0, sizeof(*cparam));
1103
1104        mlx5e_build_rq_param(priv, &cparam->rq);
1105        mlx5e_build_sq_param(priv, &cparam->sq);
1106        mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1107        mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1108}
1109
1110static int mlx5e_open_channels(struct mlx5e_priv *priv)
1111{
1112        struct mlx5e_channel_param cparam;
1113        int nch = priv->params.num_channels;
1114        int err = -ENOMEM;
1115        int i;
1116        int j;
1117
1118        priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1119                                GFP_KERNEL);
1120
1121        priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1122                                      sizeof(struct mlx5e_sq *), GFP_KERNEL);
1123
1124        if (!priv->channel || !priv->txq_to_sq_map)
1125                goto err_free_txq_to_sq_map;
1126
1127        mlx5e_build_channel_param(priv, &cparam);
1128        for (i = 0; i < nch; i++) {
1129                err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1130                if (err)
1131                        goto err_close_channels;
1132        }
1133
1134        for (j = 0; j < nch; j++) {
1135                err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1136                if (err)
1137                        goto err_close_channels;
1138        }
1139
1140        return 0;
1141
1142err_close_channels:
1143        for (i--; i >= 0; i--)
1144                mlx5e_close_channel(priv->channel[i]);
1145
1146err_free_txq_to_sq_map:
1147        kfree(priv->txq_to_sq_map);
1148        kfree(priv->channel);
1149
1150        return err;
1151}
1152
1153static void mlx5e_close_channels(struct mlx5e_priv *priv)
1154{
1155        int i;
1156
1157        for (i = 0; i < priv->params.num_channels; i++)
1158                mlx5e_close_channel(priv->channel[i]);
1159
1160        kfree(priv->txq_to_sq_map);
1161        kfree(priv->channel);
1162}
1163
1164static int mlx5e_rx_hash_fn(int hfunc)
1165{
1166        return (hfunc == ETH_RSS_HASH_TOP) ?
1167               MLX5_RX_HASH_FN_TOEPLITZ :
1168               MLX5_RX_HASH_FN_INVERTED_XOR8;
1169}
1170
1171static int mlx5e_bits_invert(unsigned long a, int size)
1172{
1173        int inv = 0;
1174        int i;
1175
1176        for (i = 0; i < size; i++)
1177                inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1178
1179        return inv;
1180}
1181
1182static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1183{
1184        int i;
1185
1186        for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1187                int ix = i;
1188
1189                if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1190                        ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1191
1192                ix = priv->params.indirection_rqt[ix];
1193                MLX5_SET(rqtc, rqtc, rq_num[i],
1194                         test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1195                         priv->channel[ix]->rq.rqn :
1196                         priv->drop_rq.rqn);
1197        }
1198}
1199
1200static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1201                                enum mlx5e_rqt_ix rqt_ix)
1202{
1203
1204        switch (rqt_ix) {
1205        case MLX5E_INDIRECTION_RQT:
1206                mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1207
1208                break;
1209
1210        default: /* MLX5E_SINGLE_RQ_RQT */
1211                MLX5_SET(rqtc, rqtc, rq_num[0],
1212                         test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1213                         priv->channel[0]->rq.rqn :
1214                         priv->drop_rq.rqn);
1215
1216                break;
1217        }
1218}
1219
1220static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1221{
1222        struct mlx5_core_dev *mdev = priv->mdev;
1223        u32 *in;
1224        void *rqtc;
1225        int inlen;
1226        int sz;
1227        int err;
1228
1229        sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
1230
1231        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1232        in = mlx5_vzalloc(inlen);
1233        if (!in)
1234                return -ENOMEM;
1235
1236        rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1237
1238        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1239        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1240
1241        mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1242
1243        err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1244
1245        kvfree(in);
1246
1247        return err;
1248}
1249
1250int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1251{
1252        struct mlx5_core_dev *mdev = priv->mdev;
1253        u32 *in;
1254        void *rqtc;
1255        int inlen;
1256        int sz;
1257        int err;
1258
1259        sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
1260
1261        inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1262        in = mlx5_vzalloc(inlen);
1263        if (!in)
1264                return -ENOMEM;
1265
1266        rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1267
1268        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1269
1270        mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1271
1272        MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1273
1274        err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1275
1276        kvfree(in);
1277
1278        return err;
1279}
1280
1281static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1282{
1283        mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1284}
1285
1286static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1287{
1288        mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1289        mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1290}
1291
1292static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1293{
1294        if (!priv->params.lro_en)
1295                return;
1296
1297#define ROUGH_MAX_L2_L3_HDR_SZ 256
1298
1299        MLX5_SET(tirc, tirc, lro_enable_mask,
1300                 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1301                 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1302        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1303                 (priv->params.lro_wqe_sz -
1304                  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1305        MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1306                 MLX5_CAP_ETH(priv->mdev,
1307                              lro_timer_supported_periods[2]));
1308}
1309
1310void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1311{
1312        MLX5_SET(tirc, tirc, rx_hash_fn,
1313                 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1314        if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1315                void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1316                                             rx_hash_toeplitz_key);
1317                size_t len = MLX5_FLD_SZ_BYTES(tirc,
1318                                               rx_hash_toeplitz_key);
1319
1320                MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1321                memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1322        }
1323}
1324
1325static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
1326{
1327        struct mlx5_core_dev *mdev = priv->mdev;
1328
1329        void *in;
1330        void *tirc;
1331        int inlen;
1332        int err;
1333        int tt;
1334
1335        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1336        in = mlx5_vzalloc(inlen);
1337        if (!in)
1338                return -ENOMEM;
1339
1340        MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1341        tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1342
1343        mlx5e_build_tir_ctx_lro(tirc, priv);
1344
1345        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1346                err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1347                if (err)
1348                        break;
1349        }
1350
1351        kvfree(in);
1352
1353        return err;
1354}
1355
1356static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1357                                                  u32 tirn)
1358{
1359        void *in;
1360        int inlen;
1361        int err;
1362
1363        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1364        in = mlx5_vzalloc(inlen);
1365        if (!in)
1366                return -ENOMEM;
1367
1368        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1369
1370        err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1371
1372        kvfree(in);
1373
1374        return err;
1375}
1376
1377static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1378{
1379        int err;
1380        int i;
1381
1382        for (i = 0; i < MLX5E_NUM_TT; i++) {
1383                err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1384                                                             priv->tirn[i]);
1385                if (err)
1386                        return err;
1387        }
1388
1389        return 0;
1390}
1391
1392static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1393{
1394        struct mlx5e_priv *priv = netdev_priv(netdev);
1395        struct mlx5_core_dev *mdev = priv->mdev;
1396        int hw_mtu;
1397        int err;
1398
1399        err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1400        if (err)
1401                return err;
1402
1403        mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1404
1405        if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1406                netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1407                            __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1408
1409        netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1410        return 0;
1411}
1412
1413int mlx5e_open_locked(struct net_device *netdev)
1414{
1415        struct mlx5e_priv *priv = netdev_priv(netdev);
1416        int num_txqs;
1417        int err;
1418
1419        set_bit(MLX5E_STATE_OPENED, &priv->state);
1420
1421        num_txqs = priv->params.num_channels * priv->params.num_tc;
1422        netif_set_real_num_tx_queues(netdev, num_txqs);
1423        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1424
1425        err = mlx5e_set_dev_port_mtu(netdev);
1426        if (err)
1427                goto err_clear_state_opened_flag;
1428
1429        err = mlx5e_open_channels(priv);
1430        if (err) {
1431                netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1432                           __func__, err);
1433                goto err_clear_state_opened_flag;
1434        }
1435
1436        err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1437        if (err) {
1438                netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1439                           __func__, err);
1440                goto err_close_channels;
1441        }
1442
1443        mlx5e_update_carrier(priv);
1444        mlx5e_redirect_rqts(priv);
1445        mlx5e_timestamp_init(priv);
1446
1447        schedule_delayed_work(&priv->update_stats_work, 0);
1448
1449        return 0;
1450
1451err_close_channels:
1452        mlx5e_close_channels(priv);
1453err_clear_state_opened_flag:
1454        clear_bit(MLX5E_STATE_OPENED, &priv->state);
1455        return err;
1456}
1457
1458static int mlx5e_open(struct net_device *netdev)
1459{
1460        struct mlx5e_priv *priv = netdev_priv(netdev);
1461        int err;
1462
1463        mutex_lock(&priv->state_lock);
1464        err = mlx5e_open_locked(netdev);
1465        mutex_unlock(&priv->state_lock);
1466
1467        return err;
1468}
1469
1470int mlx5e_close_locked(struct net_device *netdev)
1471{
1472        struct mlx5e_priv *priv = netdev_priv(netdev);
1473
1474        /* May already be CLOSED in case a previous configuration operation
1475         * (e.g RX/TX queue size change) that involves close&open failed.
1476         */
1477        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1478                return 0;
1479
1480        clear_bit(MLX5E_STATE_OPENED, &priv->state);
1481
1482        mlx5e_timestamp_cleanup(priv);
1483        mlx5e_redirect_rqts(priv);
1484        netif_carrier_off(priv->netdev);
1485        mlx5e_close_channels(priv);
1486
1487        return 0;
1488}
1489
1490static int mlx5e_close(struct net_device *netdev)
1491{
1492        struct mlx5e_priv *priv = netdev_priv(netdev);
1493        int err;
1494
1495        mutex_lock(&priv->state_lock);
1496        err = mlx5e_close_locked(netdev);
1497        mutex_unlock(&priv->state_lock);
1498
1499        return err;
1500}
1501
1502static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1503                                struct mlx5e_rq *rq,
1504                                struct mlx5e_rq_param *param)
1505{
1506        struct mlx5_core_dev *mdev = priv->mdev;
1507        void *rqc = param->rqc;
1508        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1509        int err;
1510
1511        param->wq.db_numa_node = param->wq.buf_numa_node;
1512
1513        err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1514                                &rq->wq_ctrl);
1515        if (err)
1516                return err;
1517
1518        rq->priv = priv;
1519
1520        return 0;
1521}
1522
1523static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1524                                struct mlx5e_cq *cq,
1525                                struct mlx5e_cq_param *param)
1526{
1527        struct mlx5_core_dev *mdev = priv->mdev;
1528        struct mlx5_core_cq *mcq = &cq->mcq;
1529        int eqn_not_used;
1530        unsigned int irqn;
1531        int err;
1532
1533        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1534                               &cq->wq_ctrl);
1535        if (err)
1536                return err;
1537
1538        mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1539
1540        mcq->cqe_sz     = 64;
1541        mcq->set_ci_db  = cq->wq_ctrl.db.db;
1542        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1543        *mcq->set_ci_db = 0;
1544        *mcq->arm_db    = 0;
1545        mcq->vector     = param->eq_ix;
1546        mcq->comp       = mlx5e_completion_event;
1547        mcq->event      = mlx5e_cq_error_event;
1548        mcq->irqn       = irqn;
1549        mcq->uar        = &priv->cq_uar;
1550
1551        cq->priv = priv;
1552
1553        return 0;
1554}
1555
1556static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1557{
1558        struct mlx5e_cq_param cq_param;
1559        struct mlx5e_rq_param rq_param;
1560        struct mlx5e_rq *rq = &priv->drop_rq;
1561        struct mlx5e_cq *cq = &priv->drop_rq.cq;
1562        int err;
1563
1564        memset(&cq_param, 0, sizeof(cq_param));
1565        memset(&rq_param, 0, sizeof(rq_param));
1566        mlx5e_build_rx_cq_param(priv, &cq_param);
1567        mlx5e_build_rq_param(priv, &rq_param);
1568
1569        err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1570        if (err)
1571                return err;
1572
1573        err = mlx5e_enable_cq(cq, &cq_param);
1574        if (err)
1575                goto err_destroy_cq;
1576
1577        err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1578        if (err)
1579                goto err_disable_cq;
1580
1581        err = mlx5e_enable_rq(rq, &rq_param);
1582        if (err)
1583                goto err_destroy_rq;
1584
1585        return 0;
1586
1587err_destroy_rq:
1588        mlx5e_destroy_rq(&priv->drop_rq);
1589
1590err_disable_cq:
1591        mlx5e_disable_cq(&priv->drop_rq.cq);
1592
1593err_destroy_cq:
1594        mlx5e_destroy_cq(&priv->drop_rq.cq);
1595
1596        return err;
1597}
1598
1599static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1600{
1601        mlx5e_disable_rq(&priv->drop_rq);
1602        mlx5e_destroy_rq(&priv->drop_rq);
1603        mlx5e_disable_cq(&priv->drop_rq.cq);
1604        mlx5e_destroy_cq(&priv->drop_rq.cq);
1605}
1606
1607static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1608{
1609        struct mlx5_core_dev *mdev = priv->mdev;
1610        u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1611        void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1612
1613        memset(in, 0, sizeof(in));
1614
1615        MLX5_SET(tisc, tisc, prio,  tc);
1616        MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1617
1618        return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1619}
1620
1621static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1622{
1623        mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1624}
1625
1626static int mlx5e_create_tises(struct mlx5e_priv *priv)
1627{
1628        int err;
1629        int tc;
1630
1631        for (tc = 0; tc < priv->params.num_tc; tc++) {
1632                err = mlx5e_create_tis(priv, tc);
1633                if (err)
1634                        goto err_close_tises;
1635        }
1636
1637        return 0;
1638
1639err_close_tises:
1640        for (tc--; tc >= 0; tc--)
1641                mlx5e_destroy_tis(priv, tc);
1642
1643        return err;
1644}
1645
1646static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
1647{
1648        int tc;
1649
1650        for (tc = 0; tc < priv->params.num_tc; tc++)
1651                mlx5e_destroy_tis(priv, tc);
1652}
1653
1654static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1655{
1656        void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1657
1658        MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1659
1660#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1661                                 MLX5_HASH_FIELD_SEL_DST_IP)
1662
1663#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1664                                 MLX5_HASH_FIELD_SEL_DST_IP   |\
1665                                 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1666                                 MLX5_HASH_FIELD_SEL_L4_DPORT)
1667
1668#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1669                                 MLX5_HASH_FIELD_SEL_DST_IP   |\
1670                                 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1671
1672        mlx5e_build_tir_ctx_lro(tirc, priv);
1673
1674        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1675
1676        switch (tt) {
1677        case MLX5E_TT_ANY:
1678                MLX5_SET(tirc, tirc, indirect_table,
1679                         priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1680                MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
1681                break;
1682        default:
1683                MLX5_SET(tirc, tirc, indirect_table,
1684                         priv->rqtn[MLX5E_INDIRECTION_RQT]);
1685                mlx5e_build_tir_ctx_hash(tirc, priv);
1686                break;
1687        }
1688
1689        switch (tt) {
1690        case MLX5E_TT_IPV4_TCP:
1691                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1692                         MLX5_L3_PROT_TYPE_IPV4);
1693                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1694                         MLX5_L4_PROT_TYPE_TCP);
1695                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1696                         MLX5_HASH_IP_L4PORTS);
1697                break;
1698
1699        case MLX5E_TT_IPV6_TCP:
1700                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1701                         MLX5_L3_PROT_TYPE_IPV6);
1702                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1703                         MLX5_L4_PROT_TYPE_TCP);
1704                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1705                         MLX5_HASH_IP_L4PORTS);
1706                break;
1707
1708        case MLX5E_TT_IPV4_UDP:
1709                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1710                         MLX5_L3_PROT_TYPE_IPV4);
1711                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1712                         MLX5_L4_PROT_TYPE_UDP);
1713                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1714                         MLX5_HASH_IP_L4PORTS);
1715                break;
1716
1717        case MLX5E_TT_IPV6_UDP:
1718                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1719                         MLX5_L3_PROT_TYPE_IPV6);
1720                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1721                         MLX5_L4_PROT_TYPE_UDP);
1722                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1723                         MLX5_HASH_IP_L4PORTS);
1724                break;
1725
1726        case MLX5E_TT_IPV4_IPSEC_AH:
1727                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1728                         MLX5_L3_PROT_TYPE_IPV4);
1729                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1730                         MLX5_HASH_IP_IPSEC_SPI);
1731                break;
1732
1733        case MLX5E_TT_IPV6_IPSEC_AH:
1734                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1735                         MLX5_L3_PROT_TYPE_IPV6);
1736                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1737                         MLX5_HASH_IP_IPSEC_SPI);
1738                break;
1739
1740        case MLX5E_TT_IPV4_IPSEC_ESP:
1741                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1742                         MLX5_L3_PROT_TYPE_IPV4);
1743                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1744                         MLX5_HASH_IP_IPSEC_SPI);
1745                break;
1746
1747        case MLX5E_TT_IPV6_IPSEC_ESP:
1748                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1749                         MLX5_L3_PROT_TYPE_IPV6);
1750                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1751                         MLX5_HASH_IP_IPSEC_SPI);
1752                break;
1753
1754        case MLX5E_TT_IPV4:
1755                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1756                         MLX5_L3_PROT_TYPE_IPV4);
1757                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1758                         MLX5_HASH_IP);
1759                break;
1760
1761        case MLX5E_TT_IPV6:
1762                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1763                         MLX5_L3_PROT_TYPE_IPV6);
1764                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1765                         MLX5_HASH_IP);
1766                break;
1767        }
1768}
1769
1770static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
1771{
1772        struct mlx5_core_dev *mdev = priv->mdev;
1773        u32 *in;
1774        void *tirc;
1775        int inlen;
1776        int err;
1777
1778        inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1779        in = mlx5_vzalloc(inlen);
1780        if (!in)
1781                return -ENOMEM;
1782
1783        tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1784
1785        mlx5e_build_tir_ctx(priv, tirc, tt);
1786
1787        err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
1788
1789        kvfree(in);
1790
1791        return err;
1792}
1793
1794static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
1795{
1796        mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
1797}
1798
1799static int mlx5e_create_tirs(struct mlx5e_priv *priv)
1800{
1801        int err;
1802        int i;
1803
1804        for (i = 0; i < MLX5E_NUM_TT; i++) {
1805                err = mlx5e_create_tir(priv, i);
1806                if (err)
1807                        goto err_destroy_tirs;
1808        }
1809
1810        return 0;
1811
1812err_destroy_tirs:
1813        for (i--; i >= 0; i--)
1814                mlx5e_destroy_tir(priv, i);
1815
1816        return err;
1817}
1818
1819static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
1820{
1821        int i;
1822
1823        for (i = 0; i < MLX5E_NUM_TT; i++)
1824                mlx5e_destroy_tir(priv, i);
1825}
1826
1827static struct rtnl_link_stats64 *
1828mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1829{
1830        struct mlx5e_priv *priv = netdev_priv(dev);
1831        struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1832
1833        stats->rx_packets = vstats->rx_packets;
1834        stats->rx_bytes   = vstats->rx_bytes;
1835        stats->tx_packets = vstats->tx_packets;
1836        stats->tx_bytes   = vstats->tx_bytes;
1837        stats->multicast  = vstats->rx_multicast_packets +
1838                            vstats->tx_multicast_packets;
1839        stats->tx_errors  = vstats->tx_error_packets;
1840        stats->rx_errors  = vstats->rx_error_packets;
1841        stats->tx_dropped = vstats->tx_queue_dropped;
1842        stats->rx_crc_errors = 0;
1843        stats->rx_length_errors = 0;
1844
1845        return stats;
1846}
1847
1848static void mlx5e_set_rx_mode(struct net_device *dev)
1849{
1850        struct mlx5e_priv *priv = netdev_priv(dev);
1851
1852        schedule_work(&priv->set_rx_mode_work);
1853}
1854
1855static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1856{
1857        struct mlx5e_priv *priv = netdev_priv(netdev);
1858        struct sockaddr *saddr = addr;
1859
1860        if (!is_valid_ether_addr(saddr->sa_data))
1861                return -EADDRNOTAVAIL;
1862
1863        netif_addr_lock_bh(netdev);
1864        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1865        netif_addr_unlock_bh(netdev);
1866
1867        schedule_work(&priv->set_rx_mode_work);
1868
1869        return 0;
1870}
1871
1872static int mlx5e_set_features(struct net_device *netdev,
1873                              netdev_features_t features)
1874{
1875        struct mlx5e_priv *priv = netdev_priv(netdev);
1876        int err = 0;
1877        netdev_features_t changes = features ^ netdev->features;
1878
1879        mutex_lock(&priv->state_lock);
1880
1881        if (changes & NETIF_F_LRO) {
1882                bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1883
1884                if (was_opened)
1885                        mlx5e_close_locked(priv->netdev);
1886
1887                priv->params.lro_en = !!(features & NETIF_F_LRO);
1888                err = mlx5e_modify_tirs_lro(priv);
1889                if (err)
1890                        mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
1891                                       err);
1892
1893                if (was_opened)
1894                        err = mlx5e_open_locked(priv->netdev);
1895        }
1896
1897        mutex_unlock(&priv->state_lock);
1898
1899        if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1900                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1901                        mlx5e_enable_vlan_filter(priv);
1902                else
1903                        mlx5e_disable_vlan_filter(priv);
1904        }
1905
1906        return err;
1907}
1908
1909static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1910{
1911        struct mlx5e_priv *priv = netdev_priv(netdev);
1912        struct mlx5_core_dev *mdev = priv->mdev;
1913        bool was_opened;
1914        int max_mtu;
1915        int err = 0;
1916
1917        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1918
1919        max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1920
1921        if (new_mtu > max_mtu) {
1922                netdev_err(netdev,
1923                           "%s: Bad MTU (%d) > (%d) Max\n",
1924                           __func__, new_mtu, max_mtu);
1925                return -EINVAL;
1926        }
1927
1928        mutex_lock(&priv->state_lock);
1929
1930        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1931        if (was_opened)
1932                mlx5e_close_locked(netdev);
1933
1934        netdev->mtu = new_mtu;
1935
1936        if (was_opened)
1937                err = mlx5e_open_locked(netdev);
1938
1939        mutex_unlock(&priv->state_lock);
1940
1941        return err;
1942}
1943
1944static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1945{
1946        switch (cmd) {
1947        case SIOCSHWTSTAMP:
1948                return mlx5e_hwstamp_set(dev, ifr);
1949        case SIOCGHWTSTAMP:
1950                return mlx5e_hwstamp_get(dev, ifr);
1951        default:
1952                return -EOPNOTSUPP;
1953        }
1954}
1955
1956static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
1957{
1958        struct mlx5e_priv *priv = netdev_priv(dev);
1959        struct mlx5_core_dev *mdev = priv->mdev;
1960
1961        return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
1962}
1963
1964static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
1965{
1966        struct mlx5e_priv *priv = netdev_priv(dev);
1967        struct mlx5_core_dev *mdev = priv->mdev;
1968
1969        return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
1970                                           vlan, qos);
1971}
1972
1973static int mlx5_vport_link2ifla(u8 esw_link)
1974{
1975        switch (esw_link) {
1976        case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
1977                return IFLA_VF_LINK_STATE_DISABLE;
1978        case MLX5_ESW_VPORT_ADMIN_STATE_UP:
1979                return IFLA_VF_LINK_STATE_ENABLE;
1980        }
1981        return IFLA_VF_LINK_STATE_AUTO;
1982}
1983
1984static int mlx5_ifla_link2vport(u8 ifla_link)
1985{
1986        switch (ifla_link) {
1987        case IFLA_VF_LINK_STATE_DISABLE:
1988                return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
1989        case IFLA_VF_LINK_STATE_ENABLE:
1990                return MLX5_ESW_VPORT_ADMIN_STATE_UP;
1991        }
1992        return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1993}
1994
1995static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
1996                                   int link_state)
1997{
1998        struct mlx5e_priv *priv = netdev_priv(dev);
1999        struct mlx5_core_dev *mdev = priv->mdev;
2000
2001        return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
2002                                            mlx5_ifla_link2vport(link_state));
2003}
2004
2005static int mlx5e_get_vf_config(struct net_device *dev,
2006                               int vf, struct ifla_vf_info *ivi)
2007{
2008        struct mlx5e_priv *priv = netdev_priv(dev);
2009        struct mlx5_core_dev *mdev = priv->mdev;
2010        int err;
2011
2012        err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
2013        if (err)
2014                return err;
2015        ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
2016        return 0;
2017}
2018
2019static int mlx5e_get_vf_stats(struct net_device *dev,
2020                              int vf, struct ifla_vf_stats *vf_stats)
2021{
2022        struct mlx5e_priv *priv = netdev_priv(dev);
2023        struct mlx5_core_dev *mdev = priv->mdev;
2024
2025        return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2026                                            vf_stats);
2027}
2028
2029static const struct net_device_ops mlx5e_netdev_ops_basic = {
2030        .ndo_open                = mlx5e_open,
2031        .ndo_stop                = mlx5e_close,
2032        .ndo_start_xmit          = mlx5e_xmit,
2033        .ndo_get_stats64         = mlx5e_get_stats,
2034        .ndo_set_rx_mode         = mlx5e_set_rx_mode,
2035        .ndo_set_mac_address     = mlx5e_set_mac,
2036        .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
2037        .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
2038        .ndo_set_features        = mlx5e_set_features,
2039        .ndo_change_mtu          = mlx5e_change_mtu,
2040        .ndo_do_ioctl            = mlx5e_ioctl,
2041};
2042
2043static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2044        .ndo_open                = mlx5e_open,
2045        .ndo_stop                = mlx5e_close,
2046        .ndo_start_xmit          = mlx5e_xmit,
2047        .ndo_get_stats64         = mlx5e_get_stats,
2048        .ndo_set_rx_mode         = mlx5e_set_rx_mode,
2049        .ndo_set_mac_address     = mlx5e_set_mac,
2050        .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
2051        .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
2052        .ndo_set_features        = mlx5e_set_features,
2053        .ndo_change_mtu          = mlx5e_change_mtu,
2054        .ndo_do_ioctl            = mlx5e_ioctl,
2055        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
2056        .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
2057        .ndo_get_vf_config       = mlx5e_get_vf_config,
2058        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
2059        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
2060};
2061
2062static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2063{
2064        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2065                return -ENOTSUPP;
2066        if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2067            !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2068            !MLX5_CAP_ETH(mdev, csum_cap) ||
2069            !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2070            !MLX5_CAP_ETH(mdev, vlan_cap) ||
2071            !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2072            MLX5_CAP_FLOWTABLE(mdev,
2073                               flow_table_properties_nic_receive.max_ft_level)
2074                               < 3) {
2075                mlx5_core_warn(mdev,
2076                               "Not creating net device, some required device capabilities are missing\n");
2077                return -ENOTSUPP;
2078        }
2079        if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2080                mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
2081
2082        return 0;
2083}
2084
2085u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2086{
2087        int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2088
2089        return bf_buf_size -
2090               sizeof(struct mlx5e_tx_wqe) +
2091               2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2092}
2093
2094void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
2095                                   int num_channels)
2096{
2097        int i;
2098
2099        for (i = 0; i < len; i++)
2100                indirection_rqt[i] = i % num_channels;
2101}
2102
2103static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2104                                    struct net_device *netdev,
2105                                    int num_channels)
2106{
2107        struct mlx5e_priv *priv = netdev_priv(netdev);
2108
2109        priv->params.log_sq_size           =
2110                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2111        priv->params.log_rq_size           =
2112                MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2113        priv->params.rx_cq_moderation_usec =
2114                MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2115        priv->params.rx_cq_moderation_pkts =
2116                MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2117        priv->params.tx_cq_moderation_usec =
2118                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2119        priv->params.tx_cq_moderation_pkts =
2120                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2121        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
2122        priv->params.min_rx_wqes           =
2123                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2124        priv->params.num_tc                = 1;
2125        priv->params.default_vlan_prio     = 0;
2126        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
2127
2128        netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2129                            sizeof(priv->params.toeplitz_hash_key));
2130
2131        mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
2132                                      MLX5E_INDIR_RQT_SIZE, num_channels);
2133
2134        priv->params.lro_wqe_sz            =
2135                MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2136
2137        priv->mdev                         = mdev;
2138        priv->netdev                       = netdev;
2139        priv->params.num_channels          = num_channels;
2140        priv->default_vlan_prio            = priv->params.default_vlan_prio;
2141
2142        spin_lock_init(&priv->async_events_spinlock);
2143        mutex_init(&priv->state_lock);
2144
2145        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2146        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2147        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2148}
2149
2150static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
2151{
2152        struct mlx5e_priv *priv = netdev_priv(netdev);
2153
2154        mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
2155        if (is_zero_ether_addr(netdev->dev_addr) &&
2156            !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
2157                eth_hw_addr_random(netdev);
2158                mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
2159        }
2160}
2161
2162static void mlx5e_build_netdev(struct net_device *netdev)
2163{
2164        struct mlx5e_priv *priv = netdev_priv(netdev);
2165        struct mlx5_core_dev *mdev = priv->mdev;
2166
2167        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2168
2169        if (MLX5_CAP_GEN(mdev, vport_group_manager))
2170                netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2171        else
2172                netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2173
2174        netdev->watchdog_timeo    = 15 * HZ;
2175
2176        netdev->ethtool_ops       = &mlx5e_ethtool_ops;
2177
2178        netdev->vlan_features    |= NETIF_F_SG;
2179        netdev->vlan_features    |= NETIF_F_IP_CSUM;
2180        netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
2181        netdev->vlan_features    |= NETIF_F_GRO;
2182        netdev->vlan_features    |= NETIF_F_TSO;
2183        netdev->vlan_features    |= NETIF_F_TSO6;
2184        netdev->vlan_features    |= NETIF_F_RXCSUM;
2185        netdev->vlan_features    |= NETIF_F_RXHASH;
2186
2187        if (!!MLX5_CAP_ETH(mdev, lro_cap))
2188                netdev->vlan_features    |= NETIF_F_LRO;
2189
2190        netdev->hw_features       = netdev->vlan_features;
2191        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
2192        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
2193        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
2194
2195        netdev->features          = netdev->hw_features;
2196        if (!priv->params.lro_en)
2197                netdev->features  &= ~NETIF_F_LRO;
2198
2199        netdev->features         |= NETIF_F_HIGHDMA;
2200
2201        netdev->priv_flags       |= IFF_UNICAST_FLT;
2202
2203        mlx5e_set_netdev_dev_addr(netdev);
2204}
2205
2206static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2207                             struct mlx5_core_mr *mr)
2208{
2209        struct mlx5_core_dev *mdev = priv->mdev;
2210        struct mlx5_create_mkey_mbox_in *in;
2211        int err;
2212
2213        in = mlx5_vzalloc(sizeof(*in));
2214        if (!in)
2215                return -ENOMEM;
2216
2217        in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2218                        MLX5_PERM_LOCAL_READ  |
2219                        MLX5_ACCESS_MODE_PA;
2220        in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2221        in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2222
2223        err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2224                                    NULL);
2225
2226        kvfree(in);
2227
2228        return err;
2229}
2230
2231static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2232{
2233        struct net_device *netdev;
2234        struct mlx5e_priv *priv;
2235        int nch = mlx5e_get_max_num_channels(mdev);
2236        int err;
2237
2238        if (mlx5e_check_required_hca_cap(mdev))
2239                return NULL;
2240
2241        netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
2242        if (!netdev) {
2243                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
2244                return NULL;
2245        }
2246
2247        mlx5e_build_netdev_priv(mdev, netdev, nch);
2248        mlx5e_build_netdev(netdev);
2249
2250        netif_carrier_off(netdev);
2251
2252        priv = netdev_priv(netdev);
2253
2254        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2255        if (err) {
2256                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
2257                goto err_free_netdev;
2258        }
2259
2260        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2261        if (err) {
2262                mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
2263                goto err_unmap_free_uar;
2264        }
2265
2266        err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn);
2267        if (err) {
2268                mlx5_core_err(mdev, "alloc td failed, %d\n", err);
2269                goto err_dealloc_pd;
2270        }
2271
2272        err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2273        if (err) {
2274                mlx5_core_err(mdev, "create mkey failed, %d\n", err);
2275                goto err_dealloc_transport_domain;
2276        }
2277
2278        err = mlx5e_create_tises(priv);
2279        if (err) {
2280                mlx5_core_warn(mdev, "create tises failed, %d\n", err);
2281                goto err_destroy_mkey;
2282        }
2283
2284        err = mlx5e_open_drop_rq(priv);
2285        if (err) {
2286                mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
2287                goto err_destroy_tises;
2288        }
2289
2290        err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
2291        if (err) {
2292                mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
2293                goto err_close_drop_rq;
2294        }
2295
2296        err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2297        if (err) {
2298                mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2299                goto err_destroy_rqt_indir;
2300        }
2301
2302        err = mlx5e_create_tirs(priv);
2303        if (err) {
2304                mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2305                goto err_destroy_rqt_single;
2306        }
2307
2308        err = mlx5e_create_flow_tables(priv);
2309        if (err) {
2310                mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2311                goto err_destroy_tirs;
2312        }
2313
2314        mlx5e_init_eth_addr(priv);
2315
2316        err = register_netdev(netdev);
2317        if (err) {
2318                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
2319                goto err_destroy_flow_tables;
2320        }
2321
2322        mlx5e_enable_async_events(priv);
2323        schedule_work(&priv->set_rx_mode_work);
2324
2325        return priv;
2326
2327err_destroy_flow_tables:
2328        mlx5e_destroy_flow_tables(priv);
2329
2330err_destroy_tirs:
2331        mlx5e_destroy_tirs(priv);
2332
2333err_destroy_rqt_single:
2334        mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2335
2336err_destroy_rqt_indir:
2337        mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2338
2339err_close_drop_rq:
2340        mlx5e_close_drop_rq(priv);
2341
2342err_destroy_tises:
2343        mlx5e_destroy_tises(priv);
2344
2345err_destroy_mkey:
2346        mlx5_core_destroy_mkey(mdev, &priv->mr);
2347
2348err_dealloc_transport_domain:
2349        mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
2350
2351err_dealloc_pd:
2352        mlx5_core_dealloc_pd(mdev, priv->pdn);
2353
2354err_unmap_free_uar:
2355        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2356
2357err_free_netdev:
2358        free_netdev(netdev);
2359
2360        return NULL;
2361}
2362
2363static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2364{
2365        struct mlx5e_priv *priv = vpriv;
2366        struct net_device *netdev = priv->netdev;
2367
2368        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2369
2370        schedule_work(&priv->set_rx_mode_work);
2371        mlx5e_disable_async_events(priv);
2372        flush_scheduled_work();
2373        unregister_netdev(netdev);
2374        mlx5e_destroy_flow_tables(priv);
2375        mlx5e_destroy_tirs(priv);
2376        mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2377        mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2378        mlx5e_close_drop_rq(priv);
2379        mlx5e_destroy_tises(priv);
2380        mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
2381        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
2382        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2383        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2384        free_netdev(netdev);
2385}
2386
2387static void *mlx5e_get_netdev(void *vpriv)
2388{
2389        struct mlx5e_priv *priv = vpriv;
2390
2391        return priv->netdev;
2392}
2393
2394static struct mlx5_interface mlx5e_interface = {
2395        .add       = mlx5e_create_netdev,
2396        .remove    = mlx5e_destroy_netdev,
2397        .event     = mlx5e_async_event,
2398        .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
2399        .get_dev   = mlx5e_get_netdev,
2400};
2401
2402void mlx5e_init(void)
2403{
2404        mlx5_register_interface(&mlx5e_interface);
2405}
2406
2407void mlx5e_cleanup(void)
2408{
2409        mlx5_unregister_interface(&mlx5e_interface);
2410}
2411