linux/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/flow_table.h>
  34#include "en.h"
  35
  36struct mlx5e_rq_param {
  37        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
  38        struct mlx5_wq_param       wq;
  39};
  40
  41struct mlx5e_sq_param {
  42        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
  43        struct mlx5_wq_param       wq;
  44        u16                        max_inline;
  45};
  46
  47struct mlx5e_cq_param {
  48        u32                        cqc[MLX5_ST_SZ_DW(cqc)];
  49        struct mlx5_wq_param       wq;
  50        u16                        eq_ix;
  51};
  52
  53struct mlx5e_channel_param {
  54        struct mlx5e_rq_param      rq;
  55        struct mlx5e_sq_param      sq;
  56        struct mlx5e_cq_param      rx_cq;
  57        struct mlx5e_cq_param      tx_cq;
  58};
  59
  60static void mlx5e_update_carrier(struct mlx5e_priv *priv)
  61{
  62        struct mlx5_core_dev *mdev = priv->mdev;
  63        u8 port_state;
  64
  65        port_state = mlx5_query_vport_state(mdev,
  66                MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
  67
  68        if (port_state == VPORT_STATE_UP)
  69                netif_carrier_on(priv->netdev);
  70        else
  71                netif_carrier_off(priv->netdev);
  72}
  73
  74static void mlx5e_update_carrier_work(struct work_struct *work)
  75{
  76        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
  77                                               update_carrier_work);
  78
  79        mutex_lock(&priv->state_lock);
  80        if (test_bit(MLX5E_STATE_OPENED, &priv->state))
  81                mlx5e_update_carrier(priv);
  82        mutex_unlock(&priv->state_lock);
  83}
  84
  85static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
  86{
  87        struct mlx5_core_dev *mdev = priv->mdev;
  88        struct mlx5e_pport_stats *s = &priv->stats.pport;
  89        u32 *in;
  90        u32 *out;
  91        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
  92
  93        in  = mlx5_vzalloc(sz);
  94        out = mlx5_vzalloc(sz);
  95        if (!in || !out)
  96                goto free_out;
  97
  98        MLX5_SET(ppcnt_reg, in, local_port, 1);
  99
 100        MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
 101        mlx5_core_access_reg(mdev, in, sz, out,
 102                             sz, MLX5_REG_PPCNT, 0, 0);
 103        memcpy(s->IEEE_802_3_counters,
 104               MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
 105               sizeof(s->IEEE_802_3_counters));
 106
 107        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
 108        mlx5_core_access_reg(mdev, in, sz, out,
 109                             sz, MLX5_REG_PPCNT, 0, 0);
 110        memcpy(s->RFC_2863_counters,
 111               MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
 112               sizeof(s->RFC_2863_counters));
 113
 114        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
 115        mlx5_core_access_reg(mdev, in, sz, out,
 116                             sz, MLX5_REG_PPCNT, 0, 0);
 117        memcpy(s->RFC_2819_counters,
 118               MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
 119               sizeof(s->RFC_2819_counters));
 120
 121free_out:
 122        kvfree(in);
 123        kvfree(out);
 124}
 125
 126void mlx5e_update_stats(struct mlx5e_priv *priv)
 127{
 128        struct mlx5_core_dev *mdev = priv->mdev;
 129        struct mlx5e_vport_stats *s = &priv->stats.vport;
 130        struct mlx5e_rq_stats *rq_stats;
 131        struct mlx5e_sq_stats *sq_stats;
 132        u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
 133        u32 *out;
 134        int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 135        u64 tx_offload_none;
 136        int i, j;
 137
 138        out = mlx5_vzalloc(outlen);
 139        if (!out)
 140                return;
 141
 142        /* Collect firts the SW counters and then HW for consistency */
 143        s->tso_packets          = 0;
 144        s->tso_bytes            = 0;
 145        s->tx_queue_stopped     = 0;
 146        s->tx_queue_wake        = 0;
 147        s->tx_queue_dropped     = 0;
 148        tx_offload_none         = 0;
 149        s->lro_packets          = 0;
 150        s->lro_bytes            = 0;
 151        s->rx_csum_none         = 0;
 152        s->rx_csum_sw           = 0;
 153        s->rx_wqe_err           = 0;
 154        for (i = 0; i < priv->params.num_channels; i++) {
 155                rq_stats = &priv->channel[i]->rq.stats;
 156
 157                s->lro_packets  += rq_stats->lro_packets;
 158                s->lro_bytes    += rq_stats->lro_bytes;
 159                s->rx_csum_none += rq_stats->csum_none;
 160                s->rx_csum_sw   += rq_stats->csum_sw;
 161                s->rx_wqe_err   += rq_stats->wqe_err;
 162
 163                for (j = 0; j < priv->params.num_tc; j++) {
 164                        sq_stats = &priv->channel[i]->sq[j].stats;
 165
 166                        s->tso_packets          += sq_stats->tso_packets;
 167                        s->tso_bytes            += sq_stats->tso_bytes;
 168                        s->tx_queue_stopped     += sq_stats->stopped;
 169                        s->tx_queue_wake        += sq_stats->wake;
 170                        s->tx_queue_dropped     += sq_stats->dropped;
 171                        tx_offload_none         += sq_stats->csum_offload_none;
 172                }
 173        }
 174
 175        /* HW counters */
 176        memset(in, 0, sizeof(in));
 177
 178        MLX5_SET(query_vport_counter_in, in, opcode,
 179                 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
 180        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
 181        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 182
 183        memset(out, 0, outlen);
 184
 185        if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
 186                goto free_out;
 187
 188#define MLX5_GET_CTR(p, x) \
 189        MLX5_GET64(query_vport_counter_out, p, x)
 190
 191        s->rx_error_packets     =
 192                MLX5_GET_CTR(out, received_errors.packets);
 193        s->rx_error_bytes       =
 194                MLX5_GET_CTR(out, received_errors.octets);
 195        s->tx_error_packets     =
 196                MLX5_GET_CTR(out, transmit_errors.packets);
 197        s->tx_error_bytes       =
 198                MLX5_GET_CTR(out, transmit_errors.octets);
 199
 200        s->rx_unicast_packets   =
 201                MLX5_GET_CTR(out, received_eth_unicast.packets);
 202        s->rx_unicast_bytes     =
 203                MLX5_GET_CTR(out, received_eth_unicast.octets);
 204        s->tx_unicast_packets   =
 205                MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
 206        s->tx_unicast_bytes     =
 207                MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
 208
 209        s->rx_multicast_packets =
 210                MLX5_GET_CTR(out, received_eth_multicast.packets);
 211        s->rx_multicast_bytes   =
 212                MLX5_GET_CTR(out, received_eth_multicast.octets);
 213        s->tx_multicast_packets =
 214                MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
 215        s->tx_multicast_bytes   =
 216                MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
 217
 218        s->rx_broadcast_packets =
 219                MLX5_GET_CTR(out, received_eth_broadcast.packets);
 220        s->rx_broadcast_bytes   =
 221                MLX5_GET_CTR(out, received_eth_broadcast.octets);
 222        s->tx_broadcast_packets =
 223                MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
 224        s->tx_broadcast_bytes   =
 225                MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
 226
 227        s->rx_packets =
 228                s->rx_unicast_packets +
 229                s->rx_multicast_packets +
 230                s->rx_broadcast_packets;
 231        s->rx_bytes =
 232                s->rx_unicast_bytes +
 233                s->rx_multicast_bytes +
 234                s->rx_broadcast_bytes;
 235        s->tx_packets =
 236                s->tx_unicast_packets +
 237                s->tx_multicast_packets +
 238                s->tx_broadcast_packets;
 239        s->tx_bytes =
 240                s->tx_unicast_bytes +
 241                s->tx_multicast_bytes +
 242                s->tx_broadcast_bytes;
 243
 244        /* Update calculated offload counters */
 245        s->tx_csum_offload = s->tx_packets - tx_offload_none;
 246        s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
 247                               s->rx_csum_sw;
 248
 249        mlx5e_update_pport_counters(priv);
 250free_out:
 251        kvfree(out);
 252}
 253
 254static void mlx5e_update_stats_work(struct work_struct *work)
 255{
 256        struct delayed_work *dwork = to_delayed_work(work);
 257        struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
 258                                               update_stats_work);
 259        mutex_lock(&priv->state_lock);
 260        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 261                mlx5e_update_stats(priv);
 262                schedule_delayed_work(dwork,
 263                                      msecs_to_jiffies(
 264                                              MLX5E_UPDATE_STATS_INTERVAL));
 265        }
 266        mutex_unlock(&priv->state_lock);
 267}
 268
 269static void __mlx5e_async_event(struct mlx5e_priv *priv,
 270                                enum mlx5_dev_event event)
 271{
 272        switch (event) {
 273        case MLX5_DEV_EVENT_PORT_UP:
 274        case MLX5_DEV_EVENT_PORT_DOWN:
 275                schedule_work(&priv->update_carrier_work);
 276                break;
 277
 278        default:
 279                break;
 280        }
 281}
 282
 283static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 284                              enum mlx5_dev_event event, unsigned long param)
 285{
 286        struct mlx5e_priv *priv = vpriv;
 287
 288        spin_lock(&priv->async_events_spinlock);
 289        if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
 290                __mlx5e_async_event(priv, event);
 291        spin_unlock(&priv->async_events_spinlock);
 292}
 293
 294static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
 295{
 296        set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
 297}
 298
 299static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 300{
 301        spin_lock_irq(&priv->async_events_spinlock);
 302        clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
 303        spin_unlock_irq(&priv->async_events_spinlock);
 304}
 305
 306#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 307#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 308
 309static int mlx5e_create_rq(struct mlx5e_channel *c,
 310                           struct mlx5e_rq_param *param,
 311                           struct mlx5e_rq *rq)
 312{
 313        struct mlx5e_priv *priv = c->priv;
 314        struct mlx5_core_dev *mdev = priv->mdev;
 315        void *rqc = param->rqc;
 316        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
 317        int wq_sz;
 318        int err;
 319        int i;
 320
 321        param->wq.db_numa_node = cpu_to_node(c->cpu);
 322
 323        err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
 324                                &rq->wq_ctrl);
 325        if (err)
 326                return err;
 327
 328        rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
 329
 330        wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 331        rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
 332                               cpu_to_node(c->cpu));
 333        if (!rq->skb) {
 334                err = -ENOMEM;
 335                goto err_rq_wq_destroy;
 336        }
 337
 338        rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
 339                                             MLX5E_SW2HW_MTU(priv->netdev->mtu);
 340        rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
 341
 342        for (i = 0; i < wq_sz; i++) {
 343                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
 344                u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
 345
 346                wqe->data.lkey       = c->mkey_be;
 347                wqe->data.byte_count =
 348                        cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
 349        }
 350
 351        rq->pdev    = c->pdev;
 352        rq->netdev  = c->netdev;
 353        rq->channel = c;
 354        rq->ix      = c->ix;
 355        rq->priv    = c->priv;
 356
 357        return 0;
 358
 359err_rq_wq_destroy:
 360        mlx5_wq_destroy(&rq->wq_ctrl);
 361
 362        return err;
 363}
 364
 365static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 366{
 367        kfree(rq->skb);
 368        mlx5_wq_destroy(&rq->wq_ctrl);
 369}
 370
 371static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 372{
 373        struct mlx5e_priv *priv = rq->priv;
 374        struct mlx5_core_dev *mdev = priv->mdev;
 375
 376        void *in;
 377        void *rqc;
 378        void *wq;
 379        int inlen;
 380        int err;
 381
 382        inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
 383                sizeof(u64) * rq->wq_ctrl.buf.npages;
 384        in = mlx5_vzalloc(inlen);
 385        if (!in)
 386                return -ENOMEM;
 387
 388        rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
 389        wq  = MLX5_ADDR_OF(rqc, rqc, wq);
 390
 391        memcpy(rqc, param->rqc, sizeof(param->rqc));
 392
 393        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
 394        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
 395        MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
 396        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
 397                                                MLX5_ADAPTER_PAGE_SHIFT);
 398        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
 399
 400        mlx5_fill_page_array(&rq->wq_ctrl.buf,
 401                             (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 402
 403        err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
 404
 405        kvfree(in);
 406
 407        return err;
 408}
 409
 410static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
 411{
 412        struct mlx5e_channel *c = rq->channel;
 413        struct mlx5e_priv *priv = c->priv;
 414        struct mlx5_core_dev *mdev = priv->mdev;
 415
 416        void *in;
 417        void *rqc;
 418        int inlen;
 419        int err;
 420
 421        inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
 422        in = mlx5_vzalloc(inlen);
 423        if (!in)
 424                return -ENOMEM;
 425
 426        rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
 427
 428        MLX5_SET(modify_rq_in, in, rq_state, curr_state);
 429        MLX5_SET(rqc, rqc, state, next_state);
 430
 431        err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
 432
 433        kvfree(in);
 434
 435        return err;
 436}
 437
 438static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 439{
 440        mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
 441}
 442
 443static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
 444{
 445        struct mlx5e_channel *c = rq->channel;
 446        struct mlx5e_priv *priv = c->priv;
 447        struct mlx5_wq_ll *wq = &rq->wq;
 448        int i;
 449
 450        for (i = 0; i < 1000; i++) {
 451                if (wq->cur_sz >= priv->params.min_rx_wqes)
 452                        return 0;
 453
 454                msleep(20);
 455        }
 456
 457        return -ETIMEDOUT;
 458}
 459
 460static int mlx5e_open_rq(struct mlx5e_channel *c,
 461                         struct mlx5e_rq_param *param,
 462                         struct mlx5e_rq *rq)
 463{
 464        int err;
 465
 466        err = mlx5e_create_rq(c, param, rq);
 467        if (err)
 468                return err;
 469
 470        err = mlx5e_enable_rq(rq, param);
 471        if (err)
 472                goto err_destroy_rq;
 473
 474        err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
 475        if (err)
 476                goto err_disable_rq;
 477
 478        set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
 479        mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
 480
 481        return 0;
 482
 483err_disable_rq:
 484        mlx5e_disable_rq(rq);
 485err_destroy_rq:
 486        mlx5e_destroy_rq(rq);
 487
 488        return err;
 489}
 490
 491static void mlx5e_close_rq(struct mlx5e_rq *rq)
 492{
 493        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
 494        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 495
 496        mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
 497        while (!mlx5_wq_ll_is_empty(&rq->wq))
 498                msleep(20);
 499
 500        /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
 501        napi_synchronize(&rq->channel->napi);
 502
 503        mlx5e_disable_rq(rq);
 504        mlx5e_destroy_rq(rq);
 505}
 506
 507static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
 508{
 509        kfree(sq->dma_fifo);
 510        kfree(sq->skb);
 511}
 512
 513static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
 514{
 515        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
 516        int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
 517
 518        sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
 519        sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
 520                                    numa);
 521
 522        if (!sq->skb || !sq->dma_fifo) {
 523                mlx5e_free_sq_db(sq);
 524                return -ENOMEM;
 525        }
 526
 527        sq->dma_fifo_mask = df_sz - 1;
 528
 529        return 0;
 530}
 531
 532static int mlx5e_create_sq(struct mlx5e_channel *c,
 533                           int tc,
 534                           struct mlx5e_sq_param *param,
 535                           struct mlx5e_sq *sq)
 536{
 537        struct mlx5e_priv *priv = c->priv;
 538        struct mlx5_core_dev *mdev = priv->mdev;
 539
 540        void *sqc = param->sqc;
 541        void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
 542        int txq_ix;
 543        int err;
 544
 545        err = mlx5_alloc_map_uar(mdev, &sq->uar);
 546        if (err)
 547                return err;
 548
 549        param->wq.db_numa_node = cpu_to_node(c->cpu);
 550
 551        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
 552                                 &sq->wq_ctrl);
 553        if (err)
 554                goto err_unmap_free_uar;
 555
 556        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
 557        sq->uar_map     = sq->uar.map;
 558        sq->uar_bf_map  = sq->uar.bf_map;
 559        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
 560        sq->max_inline  = param->max_inline;
 561
 562        err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
 563        if (err)
 564                goto err_sq_wq_destroy;
 565
 566        txq_ix = c->ix + tc * priv->params.num_channels;
 567        sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 568
 569        sq->pdev      = c->pdev;
 570        sq->mkey_be   = c->mkey_be;
 571        sq->channel   = c;
 572        sq->tc        = tc;
 573        sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
 574        sq->bf_budget = MLX5E_SQ_BF_BUDGET;
 575        priv->txq_to_sq_map[txq_ix] = sq;
 576
 577        return 0;
 578
 579err_sq_wq_destroy:
 580        mlx5_wq_destroy(&sq->wq_ctrl);
 581
 582err_unmap_free_uar:
 583        mlx5_unmap_free_uar(mdev, &sq->uar);
 584
 585        return err;
 586}
 587
 588static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
 589{
 590        struct mlx5e_channel *c = sq->channel;
 591        struct mlx5e_priv *priv = c->priv;
 592
 593        mlx5e_free_sq_db(sq);
 594        mlx5_wq_destroy(&sq->wq_ctrl);
 595        mlx5_unmap_free_uar(priv->mdev, &sq->uar);
 596}
 597
 598static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
 599{
 600        struct mlx5e_channel *c = sq->channel;
 601        struct mlx5e_priv *priv = c->priv;
 602        struct mlx5_core_dev *mdev = priv->mdev;
 603
 604        void *in;
 605        void *sqc;
 606        void *wq;
 607        int inlen;
 608        int err;
 609
 610        inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
 611                sizeof(u64) * sq->wq_ctrl.buf.npages;
 612        in = mlx5_vzalloc(inlen);
 613        if (!in)
 614                return -ENOMEM;
 615
 616        sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 617        wq = MLX5_ADDR_OF(sqc, sqc, wq);
 618
 619        memcpy(sqc, param->sqc, sizeof(param->sqc));
 620
 621        MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
 622        MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
 623        MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
 624        MLX5_SET(sqc,  sqc, tis_lst_sz,         1);
 625        MLX5_SET(sqc,  sqc, flush_in_error_en,  1);
 626
 627        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
 628        MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
 629        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
 630                                          MLX5_ADAPTER_PAGE_SHIFT);
 631        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 632
 633        mlx5_fill_page_array(&sq->wq_ctrl.buf,
 634                             (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 635
 636        err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
 637
 638        kvfree(in);
 639
 640        return err;
 641}
 642
 643static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
 644{
 645        struct mlx5e_channel *c = sq->channel;
 646        struct mlx5e_priv *priv = c->priv;
 647        struct mlx5_core_dev *mdev = priv->mdev;
 648
 649        void *in;
 650        void *sqc;
 651        int inlen;
 652        int err;
 653
 654        inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
 655        in = mlx5_vzalloc(inlen);
 656        if (!in)
 657                return -ENOMEM;
 658
 659        sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
 660
 661        MLX5_SET(modify_sq_in, in, sq_state, curr_state);
 662        MLX5_SET(sqc, sqc, state, next_state);
 663
 664        err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
 665
 666        kvfree(in);
 667
 668        return err;
 669}
 670
 671static void mlx5e_disable_sq(struct mlx5e_sq *sq)
 672{
 673        struct mlx5e_channel *c = sq->channel;
 674        struct mlx5e_priv *priv = c->priv;
 675        struct mlx5_core_dev *mdev = priv->mdev;
 676
 677        mlx5_core_destroy_sq(mdev, sq->sqn);
 678}
 679
 680static int mlx5e_open_sq(struct mlx5e_channel *c,
 681                         int tc,
 682                         struct mlx5e_sq_param *param,
 683                         struct mlx5e_sq *sq)
 684{
 685        int err;
 686
 687        err = mlx5e_create_sq(c, tc, param, sq);
 688        if (err)
 689                return err;
 690
 691        err = mlx5e_enable_sq(sq, param);
 692        if (err)
 693                goto err_destroy_sq;
 694
 695        err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
 696        if (err)
 697                goto err_disable_sq;
 698
 699        set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
 700        netdev_tx_reset_queue(sq->txq);
 701        netif_tx_start_queue(sq->txq);
 702
 703        return 0;
 704
 705err_disable_sq:
 706        mlx5e_disable_sq(sq);
 707err_destroy_sq:
 708        mlx5e_destroy_sq(sq);
 709
 710        return err;
 711}
 712
 713static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 714{
 715        __netif_tx_lock_bh(txq);
 716        netif_tx_stop_queue(txq);
 717        __netif_tx_unlock_bh(txq);
 718}
 719
 720static void mlx5e_close_sq(struct mlx5e_sq *sq)
 721{
 722        clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
 723        napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
 724        netif_tx_disable_queue(sq->txq);
 725
 726        /* ensure hw is notified of all pending wqes */
 727        if (mlx5e_sq_has_room_for(sq, 1))
 728                mlx5e_send_nop(sq, true);
 729
 730        mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
 731        while (sq->cc != sq->pc) /* wait till sq is empty */
 732                msleep(20);
 733
 734        /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
 735        napi_synchronize(&sq->channel->napi);
 736
 737        mlx5e_disable_sq(sq);
 738        mlx5e_destroy_sq(sq);
 739}
 740
 741static int mlx5e_create_cq(struct mlx5e_channel *c,
 742                           struct mlx5e_cq_param *param,
 743                           struct mlx5e_cq *cq)
 744{
 745        struct mlx5e_priv *priv = c->priv;
 746        struct mlx5_core_dev *mdev = priv->mdev;
 747        struct mlx5_core_cq *mcq = &cq->mcq;
 748        int eqn_not_used;
 749        int irqn;
 750        int err;
 751        u32 i;
 752
 753        param->wq.buf_numa_node = cpu_to_node(c->cpu);
 754        param->wq.db_numa_node  = cpu_to_node(c->cpu);
 755        param->eq_ix   = c->ix;
 756
 757        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
 758                               &cq->wq_ctrl);
 759        if (err)
 760                return err;
 761
 762        mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
 763
 764        cq->napi        = &c->napi;
 765
 766        mcq->cqe_sz     = 64;
 767        mcq->set_ci_db  = cq->wq_ctrl.db.db;
 768        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
 769        *mcq->set_ci_db = 0;
 770        *mcq->arm_db    = 0;
 771        mcq->vector     = param->eq_ix;
 772        mcq->comp       = mlx5e_completion_event;
 773        mcq->event      = mlx5e_cq_error_event;
 774        mcq->irqn       = irqn;
 775        mcq->uar        = &priv->cq_uar;
 776
 777        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
 778                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
 779
 780                cqe->op_own = 0xf1;
 781        }
 782
 783        cq->channel = c;
 784        cq->priv = priv;
 785
 786        return 0;
 787}
 788
 789static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
 790{
 791        mlx5_wq_destroy(&cq->wq_ctrl);
 792}
 793
 794static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 795{
 796        struct mlx5e_priv *priv = cq->priv;
 797        struct mlx5_core_dev *mdev = priv->mdev;
 798        struct mlx5_core_cq *mcq = &cq->mcq;
 799
 800        void *in;
 801        void *cqc;
 802        int inlen;
 803        int irqn_not_used;
 804        int eqn;
 805        int err;
 806
 807        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 808                sizeof(u64) * cq->wq_ctrl.buf.npages;
 809        in = mlx5_vzalloc(inlen);
 810        if (!in)
 811                return -ENOMEM;
 812
 813        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 814
 815        memcpy(cqc, param->cqc, sizeof(param->cqc));
 816
 817        mlx5_fill_page_array(&cq->wq_ctrl.buf,
 818                             (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 819
 820        mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
 821
 822        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
 823        MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
 824        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
 825                                            MLX5_ADAPTER_PAGE_SHIFT);
 826        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 827
 828        err = mlx5_core_create_cq(mdev, mcq, in, inlen);
 829
 830        kvfree(in);
 831
 832        if (err)
 833                return err;
 834
 835        mlx5e_cq_arm(cq);
 836
 837        return 0;
 838}
 839
 840static void mlx5e_disable_cq(struct mlx5e_cq *cq)
 841{
 842        struct mlx5e_priv *priv = cq->priv;
 843        struct mlx5_core_dev *mdev = priv->mdev;
 844
 845        mlx5_core_destroy_cq(mdev, &cq->mcq);
 846}
 847
 848static int mlx5e_open_cq(struct mlx5e_channel *c,
 849                         struct mlx5e_cq_param *param,
 850                         struct mlx5e_cq *cq,
 851                         u16 moderation_usecs,
 852                         u16 moderation_frames)
 853{
 854        int err;
 855        struct mlx5e_priv *priv = c->priv;
 856        struct mlx5_core_dev *mdev = priv->mdev;
 857
 858        err = mlx5e_create_cq(c, param, cq);
 859        if (err)
 860                return err;
 861
 862        err = mlx5e_enable_cq(cq, param);
 863        if (err)
 864                goto err_destroy_cq;
 865
 866        err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
 867                                             moderation_usecs,
 868                                             moderation_frames);
 869        if (err)
 870                goto err_destroy_cq;
 871
 872        return 0;
 873
 874err_destroy_cq:
 875        mlx5e_destroy_cq(cq);
 876
 877        return err;
 878}
 879
 880static void mlx5e_close_cq(struct mlx5e_cq *cq)
 881{
 882        mlx5e_disable_cq(cq);
 883        mlx5e_destroy_cq(cq);
 884}
 885
 886static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
 887{
 888        return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
 889}
 890
 891static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
 892                             struct mlx5e_channel_param *cparam)
 893{
 894        struct mlx5e_priv *priv = c->priv;
 895        int err;
 896        int tc;
 897
 898        for (tc = 0; tc < c->num_tc; tc++) {
 899                err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
 900                                    priv->params.tx_cq_moderation_usec,
 901                                    priv->params.tx_cq_moderation_pkts);
 902                if (err)
 903                        goto err_close_tx_cqs;
 904        }
 905
 906        return 0;
 907
 908err_close_tx_cqs:
 909        for (tc--; tc >= 0; tc--)
 910                mlx5e_close_cq(&c->sq[tc].cq);
 911
 912        return err;
 913}
 914
 915static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
 916{
 917        int tc;
 918
 919        for (tc = 0; tc < c->num_tc; tc++)
 920                mlx5e_close_cq(&c->sq[tc].cq);
 921}
 922
 923static int mlx5e_open_sqs(struct mlx5e_channel *c,
 924                          struct mlx5e_channel_param *cparam)
 925{
 926        int err;
 927        int tc;
 928
 929        for (tc = 0; tc < c->num_tc; tc++) {
 930                err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
 931                if (err)
 932                        goto err_close_sqs;
 933        }
 934
 935        return 0;
 936
 937err_close_sqs:
 938        for (tc--; tc >= 0; tc--)
 939                mlx5e_close_sq(&c->sq[tc]);
 940
 941        return err;
 942}
 943
 944static void mlx5e_close_sqs(struct mlx5e_channel *c)
 945{
 946        int tc;
 947
 948        for (tc = 0; tc < c->num_tc; tc++)
 949                mlx5e_close_sq(&c->sq[tc]);
 950}
 951
 952static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
 953{
 954        int i;
 955
 956        for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
 957                priv->channeltc_to_txq_map[ix][i] =
 958                        ix + i * priv->params.num_channels;
 959}
 960
 961static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 962                              struct mlx5e_channel_param *cparam,
 963                              struct mlx5e_channel **cp)
 964{
 965        struct net_device *netdev = priv->netdev;
 966        int cpu = mlx5e_get_cpu(priv, ix);
 967        struct mlx5e_channel *c;
 968        int err;
 969
 970        c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
 971        if (!c)
 972                return -ENOMEM;
 973
 974        c->priv     = priv;
 975        c->ix       = ix;
 976        c->cpu      = cpu;
 977        c->pdev     = &priv->mdev->pdev->dev;
 978        c->netdev   = priv->netdev;
 979        c->mkey_be  = cpu_to_be32(priv->mr.key);
 980        c->num_tc   = priv->params.num_tc;
 981
 982        mlx5e_build_channeltc_to_txq_map(priv, ix);
 983
 984        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 985
 986        err = mlx5e_open_tx_cqs(c, cparam);
 987        if (err)
 988                goto err_napi_del;
 989
 990        err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
 991                            priv->params.rx_cq_moderation_usec,
 992                            priv->params.rx_cq_moderation_pkts);
 993        if (err)
 994                goto err_close_tx_cqs;
 995
 996        napi_enable(&c->napi);
 997
 998        err = mlx5e_open_sqs(c, cparam);
 999        if (err)
1000                goto err_disable_napi;
1001
1002        err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1003        if (err)
1004                goto err_close_sqs;
1005
1006        netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1007        *cp = c;
1008
1009        return 0;
1010
1011err_close_sqs:
1012        mlx5e_close_sqs(c);
1013
1014err_disable_napi:
1015        napi_disable(&c->napi);
1016        mlx5e_close_cq(&c->rq.cq);
1017
1018err_close_tx_cqs:
1019        mlx5e_close_tx_cqs(c);
1020
1021err_napi_del:
1022        netif_napi_del(&c->napi);
1023        kfree(c);
1024
1025        return err;
1026}
1027
1028static void mlx5e_close_channel(struct mlx5e_channel *c)
1029{
1030        mlx5e_close_rq(&c->rq);
1031        mlx5e_close_sqs(c);
1032        napi_disable(&c->napi);
1033        mlx5e_close_cq(&c->rq.cq);
1034        mlx5e_close_tx_cqs(c);
1035        netif_napi_del(&c->napi);
1036        kfree(c);
1037}
1038
1039static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1040                                 struct mlx5e_rq_param *param)
1041{
1042        void *rqc = param->rqc;
1043        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1044
1045        MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
1046        MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1047        MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1048        MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
1049        MLX5_SET(wq, wq, pd,               priv->pdn);
1050
1051        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1052        param->wq.linear = 1;
1053}
1054
1055static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1056                                 struct mlx5e_sq_param *param)
1057{
1058        void *sqc = param->sqc;
1059        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1060
1061        MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
1062        MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1063        MLX5_SET(wq, wq, pd,            priv->pdn);
1064
1065        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1066        param->max_inline = priv->params.tx_max_inline;
1067}
1068
1069static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1070                                        struct mlx5e_cq_param *param)
1071{
1072        void *cqc = param->cqc;
1073
1074        MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1075}
1076
1077static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1078                                    struct mlx5e_cq_param *param)
1079{
1080        void *cqc = param->cqc;
1081
1082        MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
1083
1084        mlx5e_build_common_cq_param(priv, param);
1085}
1086
1087static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1088                                    struct mlx5e_cq_param *param)
1089{
1090        void *cqc = param->cqc;
1091
1092        MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
1093
1094        mlx5e_build_common_cq_param(priv, param);
1095}
1096
1097static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1098                                      struct mlx5e_channel_param *cparam)
1099{
1100        memset(cparam, 0, sizeof(*cparam));
1101
1102        mlx5e_build_rq_param(priv, &cparam->rq);
1103        mlx5e_build_sq_param(priv, &cparam->sq);
1104        mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1105        mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1106}
1107
1108static int mlx5e_open_channels(struct mlx5e_priv *priv)
1109{
1110        struct mlx5e_channel_param cparam;
1111        int nch = priv->params.num_channels;
1112        int err = -ENOMEM;
1113        int i;
1114        int j;
1115
1116        priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1117                                GFP_KERNEL);
1118
1119        priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1120                                      sizeof(struct mlx5e_sq *), GFP_KERNEL);
1121
1122        if (!priv->channel || !priv->txq_to_sq_map)
1123                goto err_free_txq_to_sq_map;
1124
1125        mlx5e_build_channel_param(priv, &cparam);
1126        for (i = 0; i < nch; i++) {
1127                err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1128                if (err)
1129                        goto err_close_channels;
1130        }
1131
1132        for (j = 0; j < nch; j++) {
1133                err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1134                if (err)
1135                        goto err_close_channels;
1136        }
1137
1138        return 0;
1139
1140err_close_channels:
1141        for (i--; i >= 0; i--)
1142                mlx5e_close_channel(priv->channel[i]);
1143
1144err_free_txq_to_sq_map:
1145        kfree(priv->txq_to_sq_map);
1146        kfree(priv->channel);
1147
1148        return err;
1149}
1150
1151static void mlx5e_close_channels(struct mlx5e_priv *priv)
1152{
1153        int i;
1154
1155        for (i = 0; i < priv->params.num_channels; i++)
1156                mlx5e_close_channel(priv->channel[i]);
1157
1158        kfree(priv->txq_to_sq_map);
1159        kfree(priv->channel);
1160}
1161
1162static int mlx5e_rx_hash_fn(int hfunc)
1163{
1164        return (hfunc == ETH_RSS_HASH_TOP) ?
1165               MLX5_RX_HASH_FN_TOEPLITZ :
1166               MLX5_RX_HASH_FN_INVERTED_XOR8;
1167}
1168
1169static int mlx5e_bits_invert(unsigned long a, int size)
1170{
1171        int inv = 0;
1172        int i;
1173
1174        for (i = 0; i < size; i++)
1175                inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1176
1177        return inv;
1178}
1179
1180static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1181{
1182        int i;
1183
1184        for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1185                int ix = i;
1186
1187                if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1188                        ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1189
1190                ix = priv->params.indirection_rqt[ix];
1191                ix = ix % priv->params.num_channels;
1192                MLX5_SET(rqtc, rqtc, rq_num[i],
1193                         test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1194                         priv->channel[ix]->rq.rqn :
1195                         priv->drop_rq.rqn);
1196        }
1197}
1198
1199static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1200                                enum mlx5e_rqt_ix rqt_ix)
1201{
1202
1203        switch (rqt_ix) {
1204        case MLX5E_INDIRECTION_RQT:
1205                mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1206
1207                break;
1208
1209        default: /* MLX5E_SINGLE_RQ_RQT */
1210                MLX5_SET(rqtc, rqtc, rq_num[0],
1211                         test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1212                         priv->channel[0]->rq.rqn :
1213                         priv->drop_rq.rqn);
1214
1215                break;
1216        }
1217}
1218
1219static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1220{
1221        struct mlx5_core_dev *mdev = priv->mdev;
1222        u32 *in;
1223        void *rqtc;
1224        int inlen;
1225        int sz;
1226        int err;
1227
1228        sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
1229
1230        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1231        in = mlx5_vzalloc(inlen);
1232        if (!in)
1233                return -ENOMEM;
1234
1235        rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1236
1237        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1238        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1239
1240        mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1241
1242        err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1243
1244        kvfree(in);
1245
1246        return err;
1247}
1248
1249int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1250{
1251        struct mlx5_core_dev *mdev = priv->mdev;
1252        u32 *in;
1253        void *rqtc;
1254        int inlen;
1255        int sz;
1256        int err;
1257
1258        sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
1259
1260        inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1261        in = mlx5_vzalloc(inlen);
1262        if (!in)
1263                return -ENOMEM;
1264
1265        rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1266
1267        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1268
1269        mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1270
1271        MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1272
1273        err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1274
1275        kvfree(in);
1276
1277        return err;
1278}
1279
1280static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1281{
1282        mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1283}
1284
1285static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1286{
1287        mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1288        mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1289}
1290
1291static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1292{
1293        if (!priv->params.lro_en)
1294                return;
1295
1296#define ROUGH_MAX_L2_L3_HDR_SZ 256
1297
1298        MLX5_SET(tirc, tirc, lro_enable_mask,
1299                 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1300                 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1301        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1302                 (priv->params.lro_wqe_sz -
1303                  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1304        MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1305                 MLX5_CAP_ETH(priv->mdev,
1306                              lro_timer_supported_periods[2]));
1307}
1308
1309static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1310{
1311        struct mlx5_core_dev *mdev = priv->mdev;
1312
1313        void *in;
1314        void *tirc;
1315        int inlen;
1316        int err;
1317
1318        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1319        in = mlx5_vzalloc(inlen);
1320        if (!in)
1321                return -ENOMEM;
1322
1323        MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1324        tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1325
1326        mlx5e_build_tir_ctx_lro(tirc, priv);
1327
1328        err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1329
1330        kvfree(in);
1331
1332        return err;
1333}
1334
1335static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1336{
1337        struct mlx5e_priv *priv = netdev_priv(netdev);
1338        struct mlx5_core_dev *mdev = priv->mdev;
1339        int hw_mtu;
1340        int err;
1341
1342        err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1343        if (err)
1344                return err;
1345
1346        mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1347
1348        if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1349                netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1350                            __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1351
1352        netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1353        return 0;
1354}
1355
1356int mlx5e_open_locked(struct net_device *netdev)
1357{
1358        struct mlx5e_priv *priv = netdev_priv(netdev);
1359        int num_txqs;
1360        int err;
1361
1362        set_bit(MLX5E_STATE_OPENED, &priv->state);
1363
1364        num_txqs = priv->params.num_channels * priv->params.num_tc;
1365        netif_set_real_num_tx_queues(netdev, num_txqs);
1366        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1367
1368        err = mlx5e_set_dev_port_mtu(netdev);
1369        if (err)
1370                return err;
1371
1372        err = mlx5e_open_channels(priv);
1373        if (err) {
1374                netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1375                           __func__, err);
1376                return err;
1377        }
1378
1379        mlx5e_update_carrier(priv);
1380        mlx5e_redirect_rqts(priv);
1381
1382        schedule_delayed_work(&priv->update_stats_work, 0);
1383
1384        return 0;
1385}
1386
1387static int mlx5e_open(struct net_device *netdev)
1388{
1389        struct mlx5e_priv *priv = netdev_priv(netdev);
1390        int err;
1391
1392        mutex_lock(&priv->state_lock);
1393        err = mlx5e_open_locked(netdev);
1394        mutex_unlock(&priv->state_lock);
1395
1396        return err;
1397}
1398
1399int mlx5e_close_locked(struct net_device *netdev)
1400{
1401        struct mlx5e_priv *priv = netdev_priv(netdev);
1402
1403        clear_bit(MLX5E_STATE_OPENED, &priv->state);
1404
1405        mlx5e_redirect_rqts(priv);
1406        netif_carrier_off(priv->netdev);
1407        mlx5e_close_channels(priv);
1408
1409        return 0;
1410}
1411
1412static int mlx5e_close(struct net_device *netdev)
1413{
1414        struct mlx5e_priv *priv = netdev_priv(netdev);
1415        int err;
1416
1417        mutex_lock(&priv->state_lock);
1418        err = mlx5e_close_locked(netdev);
1419        mutex_unlock(&priv->state_lock);
1420
1421        return err;
1422}
1423
1424static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1425                                struct mlx5e_rq *rq,
1426                                struct mlx5e_rq_param *param)
1427{
1428        struct mlx5_core_dev *mdev = priv->mdev;
1429        void *rqc = param->rqc;
1430        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1431        int err;
1432
1433        param->wq.db_numa_node = param->wq.buf_numa_node;
1434
1435        err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1436                                &rq->wq_ctrl);
1437        if (err)
1438                return err;
1439
1440        rq->priv = priv;
1441
1442        return 0;
1443}
1444
1445static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1446                                struct mlx5e_cq *cq,
1447                                struct mlx5e_cq_param *param)
1448{
1449        struct mlx5_core_dev *mdev = priv->mdev;
1450        struct mlx5_core_cq *mcq = &cq->mcq;
1451        int eqn_not_used;
1452        int irqn;
1453        int err;
1454
1455        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1456                               &cq->wq_ctrl);
1457        if (err)
1458                return err;
1459
1460        mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1461
1462        mcq->cqe_sz     = 64;
1463        mcq->set_ci_db  = cq->wq_ctrl.db.db;
1464        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1465        *mcq->set_ci_db = 0;
1466        *mcq->arm_db    = 0;
1467        mcq->vector     = param->eq_ix;
1468        mcq->comp       = mlx5e_completion_event;
1469        mcq->event      = mlx5e_cq_error_event;
1470        mcq->irqn       = irqn;
1471        mcq->uar        = &priv->cq_uar;
1472
1473        cq->priv = priv;
1474
1475        return 0;
1476}
1477
1478static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1479{
1480        struct mlx5e_cq_param cq_param;
1481        struct mlx5e_rq_param rq_param;
1482        struct mlx5e_rq *rq = &priv->drop_rq;
1483        struct mlx5e_cq *cq = &priv->drop_rq.cq;
1484        int err;
1485
1486        memset(&cq_param, 0, sizeof(cq_param));
1487        memset(&rq_param, 0, sizeof(rq_param));
1488        mlx5e_build_rx_cq_param(priv, &cq_param);
1489        mlx5e_build_rq_param(priv, &rq_param);
1490
1491        err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1492        if (err)
1493                return err;
1494
1495        err = mlx5e_enable_cq(cq, &cq_param);
1496        if (err)
1497                goto err_destroy_cq;
1498
1499        err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1500        if (err)
1501                goto err_disable_cq;
1502
1503        err = mlx5e_enable_rq(rq, &rq_param);
1504        if (err)
1505                goto err_destroy_rq;
1506
1507        return 0;
1508
1509err_destroy_rq:
1510        mlx5e_destroy_rq(&priv->drop_rq);
1511
1512err_disable_cq:
1513        mlx5e_disable_cq(&priv->drop_rq.cq);
1514
1515err_destroy_cq:
1516        mlx5e_destroy_cq(&priv->drop_rq.cq);
1517
1518        return err;
1519}
1520
1521static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1522{
1523        mlx5e_disable_rq(&priv->drop_rq);
1524        mlx5e_destroy_rq(&priv->drop_rq);
1525        mlx5e_disable_cq(&priv->drop_rq.cq);
1526        mlx5e_destroy_cq(&priv->drop_rq.cq);
1527}
1528
1529static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1530{
1531        struct mlx5_core_dev *mdev = priv->mdev;
1532        u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1533        void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1534
1535        memset(in, 0, sizeof(in));
1536
1537        MLX5_SET(tisc, tisc, prio,  tc);
1538        MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1539
1540        return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1541}
1542
1543static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1544{
1545        mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1546}
1547
1548static int mlx5e_create_tises(struct mlx5e_priv *priv)
1549{
1550        int err;
1551        int tc;
1552
1553        for (tc = 0; tc < priv->params.num_tc; tc++) {
1554                err = mlx5e_create_tis(priv, tc);
1555                if (err)
1556                        goto err_close_tises;
1557        }
1558
1559        return 0;
1560
1561err_close_tises:
1562        for (tc--; tc >= 0; tc--)
1563                mlx5e_destroy_tis(priv, tc);
1564
1565        return err;
1566}
1567
1568static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
1569{
1570        int tc;
1571
1572        for (tc = 0; tc < priv->params.num_tc; tc++)
1573                mlx5e_destroy_tis(priv, tc);
1574}
1575
1576static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1577{
1578        void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1579
1580        MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1581
1582#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1583                                 MLX5_HASH_FIELD_SEL_DST_IP)
1584
1585#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1586                                 MLX5_HASH_FIELD_SEL_DST_IP   |\
1587                                 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1588                                 MLX5_HASH_FIELD_SEL_L4_DPORT)
1589
1590#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
1591                                 MLX5_HASH_FIELD_SEL_DST_IP   |\
1592                                 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1593
1594        mlx5e_build_tir_ctx_lro(tirc, priv);
1595
1596        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1597
1598        switch (tt) {
1599        case MLX5E_TT_ANY:
1600                MLX5_SET(tirc, tirc, indirect_table,
1601                         priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1602                MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
1603                break;
1604        default:
1605                MLX5_SET(tirc, tirc, indirect_table,
1606                         priv->rqtn[MLX5E_INDIRECTION_RQT]);
1607                MLX5_SET(tirc, tirc, rx_hash_fn,
1608                         mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1609                if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1610                        void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1611                                                     rx_hash_toeplitz_key);
1612                        size_t len = MLX5_FLD_SZ_BYTES(tirc,
1613                                                       rx_hash_toeplitz_key);
1614
1615                        MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1616                        memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1617                }
1618                break;
1619        }
1620
1621        switch (tt) {
1622        case MLX5E_TT_IPV4_TCP:
1623                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1624                         MLX5_L3_PROT_TYPE_IPV4);
1625                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1626                         MLX5_L4_PROT_TYPE_TCP);
1627                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1628                         MLX5_HASH_IP_L4PORTS);
1629                break;
1630
1631        case MLX5E_TT_IPV6_TCP:
1632                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1633                         MLX5_L3_PROT_TYPE_IPV6);
1634                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1635                         MLX5_L4_PROT_TYPE_TCP);
1636                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1637                         MLX5_HASH_IP_L4PORTS);
1638                break;
1639
1640        case MLX5E_TT_IPV4_UDP:
1641                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1642                         MLX5_L3_PROT_TYPE_IPV4);
1643                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1644                         MLX5_L4_PROT_TYPE_UDP);
1645                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1646                         MLX5_HASH_IP_L4PORTS);
1647                break;
1648
1649        case MLX5E_TT_IPV6_UDP:
1650                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1651                         MLX5_L3_PROT_TYPE_IPV6);
1652                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1653                         MLX5_L4_PROT_TYPE_UDP);
1654                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1655                         MLX5_HASH_IP_L4PORTS);
1656                break;
1657
1658        case MLX5E_TT_IPV4_IPSEC_AH:
1659                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1660                         MLX5_L3_PROT_TYPE_IPV4);
1661                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1662                         MLX5_HASH_IP_IPSEC_SPI);
1663                break;
1664
1665        case MLX5E_TT_IPV6_IPSEC_AH:
1666                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1667                         MLX5_L3_PROT_TYPE_IPV6);
1668                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1669                         MLX5_HASH_IP_IPSEC_SPI);
1670                break;
1671
1672        case MLX5E_TT_IPV4_IPSEC_ESP:
1673                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1674                         MLX5_L3_PROT_TYPE_IPV4);
1675                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1676                         MLX5_HASH_IP_IPSEC_SPI);
1677                break;
1678
1679        case MLX5E_TT_IPV6_IPSEC_ESP:
1680                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1681                         MLX5_L3_PROT_TYPE_IPV6);
1682                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1683                         MLX5_HASH_IP_IPSEC_SPI);
1684                break;
1685
1686        case MLX5E_TT_IPV4:
1687                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1688                         MLX5_L3_PROT_TYPE_IPV4);
1689                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1690                         MLX5_HASH_IP);
1691                break;
1692
1693        case MLX5E_TT_IPV6:
1694                MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1695                         MLX5_L3_PROT_TYPE_IPV6);
1696                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1697                         MLX5_HASH_IP);
1698                break;
1699        }
1700}
1701
1702static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
1703{
1704        struct mlx5_core_dev *mdev = priv->mdev;
1705        u32 *in;
1706        void *tirc;
1707        int inlen;
1708        int err;
1709
1710        inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1711        in = mlx5_vzalloc(inlen);
1712        if (!in)
1713                return -ENOMEM;
1714
1715        tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1716
1717        mlx5e_build_tir_ctx(priv, tirc, tt);
1718
1719        err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
1720
1721        kvfree(in);
1722
1723        return err;
1724}
1725
1726static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
1727{
1728        mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
1729}
1730
1731static int mlx5e_create_tirs(struct mlx5e_priv *priv)
1732{
1733        int err;
1734        int i;
1735
1736        for (i = 0; i < MLX5E_NUM_TT; i++) {
1737                err = mlx5e_create_tir(priv, i);
1738                if (err)
1739                        goto err_destroy_tirs;
1740        }
1741
1742        return 0;
1743
1744err_destroy_tirs:
1745        for (i--; i >= 0; i--)
1746                mlx5e_destroy_tir(priv, i);
1747
1748        return err;
1749}
1750
1751static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
1752{
1753        int i;
1754
1755        for (i = 0; i < MLX5E_NUM_TT; i++)
1756                mlx5e_destroy_tir(priv, i);
1757}
1758
1759static struct rtnl_link_stats64 *
1760mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1761{
1762        struct mlx5e_priv *priv = netdev_priv(dev);
1763        struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1764
1765        stats->rx_packets = vstats->rx_packets;
1766        stats->rx_bytes   = vstats->rx_bytes;
1767        stats->tx_packets = vstats->tx_packets;
1768        stats->tx_bytes   = vstats->tx_bytes;
1769        stats->multicast  = vstats->rx_multicast_packets +
1770                            vstats->tx_multicast_packets;
1771        stats->tx_errors  = vstats->tx_error_packets;
1772        stats->rx_errors  = vstats->rx_error_packets;
1773        stats->tx_dropped = vstats->tx_queue_dropped;
1774        stats->rx_crc_errors = 0;
1775        stats->rx_length_errors = 0;
1776
1777        return stats;
1778}
1779
1780static void mlx5e_set_rx_mode(struct net_device *dev)
1781{
1782        struct mlx5e_priv *priv = netdev_priv(dev);
1783
1784        schedule_work(&priv->set_rx_mode_work);
1785}
1786
1787static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1788{
1789        struct mlx5e_priv *priv = netdev_priv(netdev);
1790        struct sockaddr *saddr = addr;
1791
1792        if (!is_valid_ether_addr(saddr->sa_data))
1793                return -EADDRNOTAVAIL;
1794
1795        netif_addr_lock_bh(netdev);
1796        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1797        netif_addr_unlock_bh(netdev);
1798
1799        schedule_work(&priv->set_rx_mode_work);
1800
1801        return 0;
1802}
1803
1804static int mlx5e_set_features(struct net_device *netdev,
1805                              netdev_features_t features)
1806{
1807        struct mlx5e_priv *priv = netdev_priv(netdev);
1808        int err = 0;
1809        netdev_features_t changes = features ^ netdev->features;
1810
1811        mutex_lock(&priv->state_lock);
1812
1813        if (changes & NETIF_F_LRO) {
1814                bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1815
1816                if (was_opened)
1817                        mlx5e_close_locked(priv->netdev);
1818
1819                priv->params.lro_en = !!(features & NETIF_F_LRO);
1820                mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
1821                mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
1822
1823                if (was_opened)
1824                        err = mlx5e_open_locked(priv->netdev);
1825        }
1826
1827        mutex_unlock(&priv->state_lock);
1828
1829        if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1830                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1831                        mlx5e_enable_vlan_filter(priv);
1832                else
1833                        mlx5e_disable_vlan_filter(priv);
1834        }
1835
1836        return 0;
1837}
1838
1839static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1840{
1841        struct mlx5e_priv *priv = netdev_priv(netdev);
1842        struct mlx5_core_dev *mdev = priv->mdev;
1843        bool was_opened;
1844        int max_mtu;
1845        int err = 0;
1846
1847        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1848
1849        if (new_mtu > max_mtu) {
1850                netdev_err(netdev,
1851                           "%s: Bad MTU (%d) > (%d) Max\n",
1852                           __func__, new_mtu, max_mtu);
1853                return -EINVAL;
1854        }
1855
1856        mutex_lock(&priv->state_lock);
1857
1858        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1859        if (was_opened)
1860                mlx5e_close_locked(netdev);
1861
1862        netdev->mtu = new_mtu;
1863
1864        if (was_opened)
1865                err = mlx5e_open_locked(netdev);
1866
1867        mutex_unlock(&priv->state_lock);
1868
1869        return err;
1870}
1871
1872static struct net_device_ops mlx5e_netdev_ops = {
1873        .ndo_open                = mlx5e_open,
1874        .ndo_stop                = mlx5e_close,
1875        .ndo_start_xmit          = mlx5e_xmit,
1876        .ndo_get_stats64         = mlx5e_get_stats,
1877        .ndo_set_rx_mode         = mlx5e_set_rx_mode,
1878        .ndo_set_mac_address     = mlx5e_set_mac,
1879        .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
1880        .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
1881        .ndo_set_features        = mlx5e_set_features,
1882        .ndo_change_mtu          = mlx5e_change_mtu,
1883};
1884
1885static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1886{
1887        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1888                return -ENOTSUPP;
1889        if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
1890            !MLX5_CAP_GEN(mdev, nic_flow_table) ||
1891            !MLX5_CAP_ETH(mdev, csum_cap) ||
1892            !MLX5_CAP_ETH(mdev, max_lso_cap) ||
1893            !MLX5_CAP_ETH(mdev, vlan_cap) ||
1894            !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
1895            MLX5_CAP_FLOWTABLE(mdev,
1896                               flow_table_properties_nic_receive.max_ft_level)
1897                               < 3) {
1898                mlx5_core_warn(mdev,
1899                               "Not creating net device, some required device capabilities are missing\n");
1900                return -ENOTSUPP;
1901        }
1902        return 0;
1903}
1904
1905u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
1906{
1907        int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1908
1909        return bf_buf_size -
1910               sizeof(struct mlx5e_tx_wqe) +
1911               2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
1912}
1913
1914static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
1915                                    struct net_device *netdev,
1916                                    int num_channels)
1917{
1918        struct mlx5e_priv *priv = netdev_priv(netdev);
1919        int i;
1920
1921        priv->params.log_sq_size           =
1922                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1923        priv->params.log_rq_size           =
1924                MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
1925        priv->params.rx_cq_moderation_usec =
1926                MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
1927        priv->params.rx_cq_moderation_pkts =
1928                MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
1929        priv->params.tx_cq_moderation_usec =
1930                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
1931        priv->params.tx_cq_moderation_pkts =
1932                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
1933        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
1934        priv->params.min_rx_wqes           =
1935                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
1936        priv->params.num_tc                = 1;
1937        priv->params.default_vlan_prio     = 0;
1938        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
1939
1940        netdev_rss_key_fill(priv->params.toeplitz_hash_key,
1941                            sizeof(priv->params.toeplitz_hash_key));
1942
1943        for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
1944                priv->params.indirection_rqt[i] = i % num_channels;
1945
1946        priv->params.lro_wqe_sz            =
1947                MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
1948
1949        priv->mdev                         = mdev;
1950        priv->netdev                       = netdev;
1951        priv->params.num_channels          = num_channels;
1952        priv->default_vlan_prio            = priv->params.default_vlan_prio;
1953
1954        spin_lock_init(&priv->async_events_spinlock);
1955        mutex_init(&priv->state_lock);
1956
1957        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
1958        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
1959        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
1960}
1961
1962static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
1963{
1964        struct mlx5e_priv *priv = netdev_priv(netdev);
1965
1966        mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
1967}
1968
1969static void mlx5e_build_netdev(struct net_device *netdev)
1970{
1971        struct mlx5e_priv *priv = netdev_priv(netdev);
1972        struct mlx5_core_dev *mdev = priv->mdev;
1973
1974        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
1975
1976        if (priv->params.num_tc > 1)
1977                mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
1978
1979        netdev->netdev_ops        = &mlx5e_netdev_ops;
1980        netdev->watchdog_timeo    = 15 * HZ;
1981
1982        netdev->ethtool_ops       = &mlx5e_ethtool_ops;
1983
1984        netdev->vlan_features    |= NETIF_F_SG;
1985        netdev->vlan_features    |= NETIF_F_IP_CSUM;
1986        netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
1987        netdev->vlan_features    |= NETIF_F_GRO;
1988        netdev->vlan_features    |= NETIF_F_TSO;
1989        netdev->vlan_features    |= NETIF_F_TSO6;
1990        netdev->vlan_features    |= NETIF_F_RXCSUM;
1991        netdev->vlan_features    |= NETIF_F_RXHASH;
1992
1993        if (!!MLX5_CAP_ETH(mdev, lro_cap))
1994                netdev->vlan_features    |= NETIF_F_LRO;
1995
1996        netdev->hw_features       = netdev->vlan_features;
1997        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
1998        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
1999
2000        netdev->features          = netdev->hw_features;
2001        if (!priv->params.lro_en)
2002                netdev->features  &= ~NETIF_F_LRO;
2003
2004        netdev->features         |= NETIF_F_HIGHDMA;
2005
2006        netdev->priv_flags       |= IFF_UNICAST_FLT;
2007
2008        mlx5e_set_netdev_dev_addr(netdev);
2009}
2010
2011static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2012                             struct mlx5_core_mr *mr)
2013{
2014        struct mlx5_core_dev *mdev = priv->mdev;
2015        struct mlx5_create_mkey_mbox_in *in;
2016        int err;
2017
2018        in = mlx5_vzalloc(sizeof(*in));
2019        if (!in)
2020                return -ENOMEM;
2021
2022        in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2023                        MLX5_PERM_LOCAL_READ  |
2024                        MLX5_ACCESS_MODE_PA;
2025        in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2026        in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2027
2028        err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2029                                    NULL);
2030
2031        kvfree(in);
2032
2033        return err;
2034}
2035
2036static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2037{
2038        struct net_device *netdev;
2039        struct mlx5e_priv *priv;
2040        int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
2041                        MLX5E_MAX_NUM_CHANNELS);
2042        int err;
2043
2044        if (mlx5e_check_required_hca_cap(mdev))
2045                return NULL;
2046
2047        netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
2048        if (!netdev) {
2049                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
2050                return NULL;
2051        }
2052
2053        mlx5e_build_netdev_priv(mdev, netdev, nch);
2054        mlx5e_build_netdev(netdev);
2055
2056        netif_carrier_off(netdev);
2057
2058        priv = netdev_priv(netdev);
2059
2060        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2061        if (err) {
2062                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
2063                goto err_free_netdev;
2064        }
2065
2066        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2067        if (err) {
2068                mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
2069                goto err_unmap_free_uar;
2070        }
2071
2072        err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
2073        if (err) {
2074                mlx5_core_err(mdev, "alloc td failed, %d\n", err);
2075                goto err_dealloc_pd;
2076        }
2077
2078        err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2079        if (err) {
2080                mlx5_core_err(mdev, "create mkey failed, %d\n", err);
2081                goto err_dealloc_transport_domain;
2082        }
2083
2084        err = mlx5e_create_tises(priv);
2085        if (err) {
2086                mlx5_core_warn(mdev, "create tises failed, %d\n", err);
2087                goto err_destroy_mkey;
2088        }
2089
2090        err = mlx5e_open_drop_rq(priv);
2091        if (err) {
2092                mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
2093                goto err_destroy_tises;
2094        }
2095
2096        err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
2097        if (err) {
2098                mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
2099                goto err_close_drop_rq;
2100        }
2101
2102        err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2103        if (err) {
2104                mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2105                goto err_destroy_rqt_indir;
2106        }
2107
2108        err = mlx5e_create_tirs(priv);
2109        if (err) {
2110                mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2111                goto err_destroy_rqt_single;
2112        }
2113
2114        err = mlx5e_create_flow_tables(priv);
2115        if (err) {
2116                mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2117                goto err_destroy_tirs;
2118        }
2119
2120        mlx5e_init_eth_addr(priv);
2121
2122        err = register_netdev(netdev);
2123        if (err) {
2124                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
2125                goto err_destroy_flow_tables;
2126        }
2127
2128        mlx5e_enable_async_events(priv);
2129        schedule_work(&priv->set_rx_mode_work);
2130
2131        return priv;
2132
2133err_destroy_flow_tables:
2134        mlx5e_destroy_flow_tables(priv);
2135
2136err_destroy_tirs:
2137        mlx5e_destroy_tirs(priv);
2138
2139err_destroy_rqt_single:
2140        mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2141
2142err_destroy_rqt_indir:
2143        mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2144
2145err_close_drop_rq:
2146        mlx5e_close_drop_rq(priv);
2147
2148err_destroy_tises:
2149        mlx5e_destroy_tises(priv);
2150
2151err_destroy_mkey:
2152        mlx5_core_destroy_mkey(mdev, &priv->mr);
2153
2154err_dealloc_transport_domain:
2155        mlx5_dealloc_transport_domain(mdev, priv->tdn);
2156
2157err_dealloc_pd:
2158        mlx5_core_dealloc_pd(mdev, priv->pdn);
2159
2160err_unmap_free_uar:
2161        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2162
2163err_free_netdev:
2164        free_netdev(netdev);
2165
2166        return NULL;
2167}
2168
2169static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2170{
2171        struct mlx5e_priv *priv = vpriv;
2172        struct net_device *netdev = priv->netdev;
2173
2174        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2175
2176        schedule_work(&priv->set_rx_mode_work);
2177        mlx5e_disable_async_events(priv);
2178        flush_scheduled_work();
2179        unregister_netdev(netdev);
2180        mlx5e_destroy_flow_tables(priv);
2181        mlx5e_destroy_tirs(priv);
2182        mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2183        mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2184        mlx5e_close_drop_rq(priv);
2185        mlx5e_destroy_tises(priv);
2186        mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
2187        mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
2188        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2189        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2190        free_netdev(netdev);
2191}
2192
2193static void *mlx5e_get_netdev(void *vpriv)
2194{
2195        struct mlx5e_priv *priv = vpriv;
2196
2197        return priv->netdev;
2198}
2199
2200static struct mlx5_interface mlx5e_interface = {
2201        .add       = mlx5e_create_netdev,
2202        .remove    = mlx5e_destroy_netdev,
2203        .event     = mlx5e_async_event,
2204        .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
2205        .get_dev   = mlx5e_get_netdev,
2206};
2207
2208void mlx5e_init(void)
2209{
2210        mlx5_register_interface(&mlx5e_interface);
2211}
2212
2213void mlx5e_cleanup(void)
2214{
2215        mlx5_unregister_interface(&mlx5e_interface);
2216}
2217