linux/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <rdma/ib_verbs.h>
  34#include <linux/mlx5/fs.h>
  35#include "en.h"
  36#include "en/params.h"
  37#include "ipoib.h"
  38
  39#define IB_DEFAULT_Q_KEY   0xb1b
  40#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
  41
  42static int mlx5i_open(struct net_device *netdev);
  43static int mlx5i_close(struct net_device *netdev);
  44static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
  45
  46static const struct net_device_ops mlx5i_netdev_ops = {
  47        .ndo_open                = mlx5i_open,
  48        .ndo_stop                = mlx5i_close,
  49        .ndo_get_stats64         = mlx5i_get_stats,
  50        .ndo_init                = mlx5i_dev_init,
  51        .ndo_uninit              = mlx5i_dev_cleanup,
  52        .ndo_change_mtu          = mlx5i_change_mtu,
  53        .ndo_eth_ioctl            = mlx5i_ioctl,
  54};
  55
  56/* IPoIB mlx5 netdev profile */
  57static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
  58                                   struct mlx5e_params *params)
  59{
  60        /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
  61        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
  62        mlx5e_set_rq_type(mdev, params);
  63        mlx5e_init_rq_type_params(mdev, params);
  64
  65        /* RQ size in ipoib by default is 512 */
  66        params->log_rq_mtu_frames = is_kdump_kernel() ?
  67                MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
  68                MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
  69
  70        params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
  71        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
  72        params->tunneled_offload_en = false;
  73}
  74
  75/* Called directly after IPoIB netdevice was created to initialize SW structs */
  76int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
  77{
  78        struct mlx5e_priv *priv  = mlx5i_epriv(netdev);
  79
  80        netif_carrier_off(netdev);
  81        mlx5e_set_netdev_mtu_boundaries(priv);
  82        netdev->mtu = netdev->max_mtu;
  83
  84        mlx5e_build_nic_params(priv, NULL, netdev->mtu);
  85        mlx5i_build_nic_params(mdev, &priv->channels.params);
  86
  87        mlx5e_timestamp_init(priv);
  88
  89        /* netdev init */
  90        netdev->hw_features    |= NETIF_F_SG;
  91        netdev->hw_features    |= NETIF_F_IP_CSUM;
  92        netdev->hw_features    |= NETIF_F_IPV6_CSUM;
  93        netdev->hw_features    |= NETIF_F_GRO;
  94        netdev->hw_features    |= NETIF_F_TSO;
  95        netdev->hw_features    |= NETIF_F_TSO6;
  96        netdev->hw_features    |= NETIF_F_RXCSUM;
  97        netdev->hw_features    |= NETIF_F_RXHASH;
  98
  99        netdev->netdev_ops = &mlx5i_netdev_ops;
 100        netdev->ethtool_ops = &mlx5i_ethtool_ops;
 101
 102        return 0;
 103}
 104
 105/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
 106void mlx5i_cleanup(struct mlx5e_priv *priv)
 107{
 108        mlx5e_priv_cleanup(priv);
 109}
 110
 111static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
 112{
 113        struct rtnl_link_stats64 s = {};
 114        int i, j;
 115
 116        for (i = 0; i < priv->stats_nch; i++) {
 117                struct mlx5e_channel_stats *channel_stats;
 118                struct mlx5e_rq_stats *rq_stats;
 119
 120                channel_stats = priv->channel_stats[i];
 121                rq_stats = &channel_stats->rq;
 122
 123                s.rx_packets += rq_stats->packets;
 124                s.rx_bytes   += rq_stats->bytes;
 125
 126                for (j = 0; j < priv->max_opened_tc; j++) {
 127                        struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
 128
 129                        s.tx_packets           += sq_stats->packets;
 130                        s.tx_bytes             += sq_stats->bytes;
 131                        s.tx_dropped           += sq_stats->dropped;
 132                }
 133        }
 134
 135        memset(&priv->stats.sw, 0, sizeof(s));
 136
 137        priv->stats.sw.rx_packets = s.rx_packets;
 138        priv->stats.sw.rx_bytes = s.rx_bytes;
 139        priv->stats.sw.tx_packets = s.tx_packets;
 140        priv->stats.sw.tx_bytes = s.tx_bytes;
 141        priv->stats.sw.tx_queue_dropped = s.tx_dropped;
 142}
 143
 144void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 145{
 146        struct mlx5e_priv     *priv   = mlx5i_epriv(dev);
 147        struct mlx5e_sw_stats *sstats = &priv->stats.sw;
 148
 149        mlx5i_grp_sw_update_stats(priv);
 150
 151        stats->rx_packets = sstats->rx_packets;
 152        stats->rx_bytes   = sstats->rx_bytes;
 153        stats->tx_packets = sstats->tx_packets;
 154        stats->tx_bytes   = sstats->tx_bytes;
 155        stats->tx_dropped = sstats->tx_queue_dropped;
 156}
 157
 158int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
 159{
 160        struct mlx5_core_dev *mdev = priv->mdev;
 161        struct mlx5i_priv *ipriv = priv->ppriv;
 162        int ret;
 163
 164        {
 165                u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
 166                u32 *qpc;
 167
 168                qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
 169
 170                MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 171                MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
 172                         ipriv->pkey_index);
 173                MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
 174                MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
 175
 176                MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
 177                MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
 178                ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
 179                if (ret)
 180                        goto err_qp_modify_to_err;
 181        }
 182        {
 183                u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
 184
 185                MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
 186                MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
 187                ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
 188                if (ret)
 189                        goto err_qp_modify_to_err;
 190        }
 191        {
 192                u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
 193
 194                MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
 195                MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
 196                ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
 197                if (ret)
 198                        goto err_qp_modify_to_err;
 199        }
 200        return 0;
 201
 202err_qp_modify_to_err:
 203        {
 204                u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
 205
 206                MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
 207                MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
 208                mlx5_cmd_exec_in(mdev, qp_2err, in);
 209        }
 210        return ret;
 211}
 212
 213void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
 214{
 215        struct mlx5i_priv *ipriv = priv->ppriv;
 216        struct mlx5_core_dev *mdev = priv->mdev;
 217        u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
 218
 219        MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
 220        MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
 221        mlx5_cmd_exec_in(mdev, qp_2rst, in);
 222}
 223
 224#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
 225
 226int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
 227{
 228        const unsigned char *dev_addr = priv->netdev->dev_addr;
 229        u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
 230        u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
 231        struct mlx5i_priv *ipriv = priv->ppriv;
 232        void *addr_path;
 233        int qpn = 0;
 234        int ret = 0;
 235        void *qpc;
 236
 237        if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) {
 238                qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3];
 239                MLX5_SET(create_qp_in, in, input_qpn, qpn);
 240        }
 241
 242        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 243        MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
 244        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
 245        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 246        MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
 247                 MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
 248
 249        addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
 250        MLX5_SET(ads, addr_path, vhca_port_num, 1);
 251        MLX5_SET(ads, addr_path, grh, 1);
 252
 253        MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
 254        ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
 255        if (ret)
 256                return ret;
 257
 258        ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
 259
 260        return 0;
 261}
 262
 263void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
 264{
 265        u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
 266
 267        MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
 268        MLX5_SET(destroy_qp_in, in, qpn, qpn);
 269        mlx5_cmd_exec_in(mdev, destroy_qp, in);
 270}
 271
 272int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
 273{
 274        return mlx5e_refresh_tirs(priv, true, true);
 275}
 276
 277int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
 278{
 279        u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
 280        void *tisc;
 281
 282        tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
 283
 284        MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
 285
 286        return mlx5e_create_tis(mdev, in, tisn);
 287}
 288
 289static int mlx5i_init_tx(struct mlx5e_priv *priv)
 290{
 291        struct mlx5i_priv *ipriv = priv->ppriv;
 292        int err;
 293
 294        err = mlx5i_create_underlay_qp(priv);
 295        if (err) {
 296                mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
 297                return err;
 298        }
 299
 300        err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
 301        if (err) {
 302                mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
 303                goto err_destroy_underlay_qp;
 304        }
 305
 306        return 0;
 307
 308err_destroy_underlay_qp:
 309        mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
 310        return err;
 311}
 312
 313static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
 314{
 315        struct mlx5i_priv *ipriv = priv->ppriv;
 316
 317        mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
 318        mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
 319}
 320
 321static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
 322{
 323        int err;
 324
 325        priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
 326                                               MLX5_FLOW_NAMESPACE_KERNEL);
 327
 328        if (!priv->fs.ns)
 329                return -EINVAL;
 330
 331        err = mlx5e_arfs_create_tables(priv);
 332        if (err) {
 333                netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
 334                           err);
 335                priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
 336        }
 337
 338        err = mlx5e_create_ttc_table(priv);
 339        if (err) {
 340                netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
 341                           err);
 342                goto err_destroy_arfs_tables;
 343        }
 344
 345        mlx5e_ethtool_init_steering(priv);
 346
 347        return 0;
 348
 349err_destroy_arfs_tables:
 350        mlx5e_arfs_destroy_tables(priv);
 351
 352        return err;
 353}
 354
 355static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
 356{
 357        mlx5e_destroy_ttc_table(priv);
 358        mlx5e_arfs_destroy_tables(priv);
 359        mlx5e_ethtool_cleanup_steering(priv);
 360}
 361
 362static int mlx5i_init_rx(struct mlx5e_priv *priv)
 363{
 364        struct mlx5_core_dev *mdev = priv->mdev;
 365        int err;
 366
 367        priv->rx_res = mlx5e_rx_res_alloc();
 368        if (!priv->rx_res)
 369                return -ENOMEM;
 370
 371        mlx5e_create_q_counters(priv);
 372
 373        err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
 374        if (err) {
 375                mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
 376                goto err_destroy_q_counters;
 377        }
 378
 379        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
 380                                priv->max_nch, priv->drop_rq.rqn,
 381                                &priv->channels.params.packet_merge,
 382                                priv->channels.params.num_channels);
 383        if (err)
 384                goto err_close_drop_rq;
 385
 386        err = mlx5i_create_flow_steering(priv);
 387        if (err)
 388                goto err_destroy_rx_res;
 389
 390        return 0;
 391
 392err_destroy_rx_res:
 393        mlx5e_rx_res_destroy(priv->rx_res);
 394err_close_drop_rq:
 395        mlx5e_close_drop_rq(&priv->drop_rq);
 396err_destroy_q_counters:
 397        mlx5e_destroy_q_counters(priv);
 398        mlx5e_rx_res_free(priv->rx_res);
 399        priv->rx_res = NULL;
 400        return err;
 401}
 402
 403static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
 404{
 405        mlx5i_destroy_flow_steering(priv);
 406        mlx5e_rx_res_destroy(priv->rx_res);
 407        mlx5e_close_drop_rq(&priv->drop_rq);
 408        mlx5e_destroy_q_counters(priv);
 409        mlx5e_rx_res_free(priv->rx_res);
 410        priv->rx_res = NULL;
 411}
 412
 413/* The stats groups order is opposite to the update_stats() order calls */
 414static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
 415        &MLX5E_STATS_GRP(sw),
 416        &MLX5E_STATS_GRP(qcnt),
 417        &MLX5E_STATS_GRP(vnic_env),
 418        &MLX5E_STATS_GRP(vport),
 419        &MLX5E_STATS_GRP(802_3),
 420        &MLX5E_STATS_GRP(2863),
 421        &MLX5E_STATS_GRP(2819),
 422        &MLX5E_STATS_GRP(phy),
 423        &MLX5E_STATS_GRP(pcie),
 424        &MLX5E_STATS_GRP(per_prio),
 425        &MLX5E_STATS_GRP(pme),
 426        &MLX5E_STATS_GRP(channels),
 427        &MLX5E_STATS_GRP(per_port_buff_congest),
 428};
 429
 430static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
 431{
 432        return ARRAY_SIZE(mlx5i_stats_grps);
 433}
 434
 435static const struct mlx5e_profile mlx5i_nic_profile = {
 436        .init              = mlx5i_init,
 437        .cleanup           = mlx5i_cleanup,
 438        .init_tx           = mlx5i_init_tx,
 439        .cleanup_tx        = mlx5i_cleanup_tx,
 440        .init_rx           = mlx5i_init_rx,
 441        .cleanup_rx        = mlx5i_cleanup_rx,
 442        .enable            = NULL, /* mlx5i_enable */
 443        .disable           = NULL, /* mlx5i_disable */
 444        .update_rx         = mlx5i_update_nic_rx,
 445        .update_stats      = NULL, /* mlx5i_update_stats */
 446        .update_carrier    = NULL, /* no HW update in IB link */
 447        .rx_handlers       = &mlx5i_rx_handlers,
 448        .max_tc            = MLX5I_MAX_NUM_TC,
 449        .rq_groups         = MLX5E_NUM_RQ_GROUPS(REGULAR),
 450        .stats_grps        = mlx5i_stats_grps,
 451        .stats_grps_num    = mlx5i_stats_grps_num,
 452};
 453
 454/* mlx5i netdev NDos */
 455
 456static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
 457{
 458        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
 459        struct mlx5e_params new_params;
 460        int err = 0;
 461
 462        mutex_lock(&priv->state_lock);
 463
 464        new_params = priv->channels.params;
 465        new_params.sw_mtu = new_mtu;
 466
 467        err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
 468        if (err)
 469                goto out;
 470
 471        netdev->mtu = new_params.sw_mtu;
 472
 473out:
 474        mutex_unlock(&priv->state_lock);
 475        return err;
 476}
 477
 478int mlx5i_dev_init(struct net_device *dev)
 479{
 480        struct mlx5e_priv    *priv   = mlx5i_epriv(dev);
 481        struct mlx5i_priv    *ipriv  = priv->ppriv;
 482        u8 addr_mod[3];
 483
 484        /* Set dev address using underlay QP */
 485        addr_mod[0] = (ipriv->qpn >> 16) & 0xff;
 486        addr_mod[1] = (ipriv->qpn >>  8) & 0xff;
 487        addr_mod[2] = (ipriv->qpn) & 0xff;
 488        dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
 489
 490        /* Add QPN to net-device mapping to HT */
 491        mlx5i_pkey_add_qpn(dev, ipriv->qpn);
 492
 493        return 0;
 494}
 495
 496int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 497{
 498        struct mlx5e_priv *priv = mlx5i_epriv(dev);
 499
 500        switch (cmd) {
 501        case SIOCSHWTSTAMP:
 502                return mlx5e_hwstamp_set(priv, ifr);
 503        case SIOCGHWTSTAMP:
 504                return mlx5e_hwstamp_get(priv, ifr);
 505        default:
 506                return -EOPNOTSUPP;
 507        }
 508}
 509
 510void mlx5i_dev_cleanup(struct net_device *dev)
 511{
 512        struct mlx5e_priv    *priv   = mlx5i_epriv(dev);
 513        struct mlx5i_priv    *ipriv = priv->ppriv;
 514
 515        mlx5i_uninit_underlay_qp(priv);
 516
 517        /* Delete QPN to net-device mapping from HT */
 518        mlx5i_pkey_del_qpn(dev, ipriv->qpn);
 519}
 520
 521static int mlx5i_open(struct net_device *netdev)
 522{
 523        struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
 524        struct mlx5i_priv *ipriv = epriv->ppriv;
 525        struct mlx5_core_dev *mdev = epriv->mdev;
 526        int err;
 527
 528        mutex_lock(&epriv->state_lock);
 529
 530        set_bit(MLX5E_STATE_OPENED, &epriv->state);
 531
 532        err = mlx5i_init_underlay_qp(epriv);
 533        if (err) {
 534                mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
 535                goto err_clear_state_opened_flag;
 536        }
 537
 538        err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
 539        if (err) {
 540                mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
 541                goto err_reset_qp;
 542        }
 543
 544        err = mlx5e_open_channels(epriv, &epriv->channels);
 545        if (err)
 546                goto err_remove_fs_underlay_qp;
 547
 548        epriv->profile->update_rx(epriv);
 549        mlx5e_activate_priv_channels(epriv);
 550
 551        mutex_unlock(&epriv->state_lock);
 552        return 0;
 553
 554err_remove_fs_underlay_qp:
 555        mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
 556err_reset_qp:
 557        mlx5i_uninit_underlay_qp(epriv);
 558err_clear_state_opened_flag:
 559        clear_bit(MLX5E_STATE_OPENED, &epriv->state);
 560        mutex_unlock(&epriv->state_lock);
 561        return err;
 562}
 563
 564static int mlx5i_close(struct net_device *netdev)
 565{
 566        struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
 567        struct mlx5i_priv *ipriv = epriv->ppriv;
 568        struct mlx5_core_dev *mdev = epriv->mdev;
 569
 570        /* May already be CLOSED in case a previous configuration operation
 571         * (e.g RX/TX queue size change) that involves close&open failed.
 572         */
 573        mutex_lock(&epriv->state_lock);
 574
 575        if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
 576                goto unlock;
 577
 578        clear_bit(MLX5E_STATE_OPENED, &epriv->state);
 579
 580        netif_carrier_off(epriv->netdev);
 581        mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
 582        mlx5e_deactivate_priv_channels(epriv);
 583        mlx5e_close_channels(&epriv->channels);
 584        mlx5i_uninit_underlay_qp(epriv);
 585unlock:
 586        mutex_unlock(&epriv->state_lock);
 587        return 0;
 588}
 589
 590/* IPoIB RDMA netdev callbacks */
 591static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
 592                              union ib_gid *gid, u16 lid, int set_qkey,
 593                              u32 qkey)
 594{
 595        struct mlx5e_priv    *epriv = mlx5i_epriv(netdev);
 596        struct mlx5_core_dev *mdev  = epriv->mdev;
 597        struct mlx5i_priv    *ipriv = epriv->ppriv;
 598        int err;
 599
 600        mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
 601                      gid->raw);
 602        err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
 603        if (err)
 604                mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
 605                               ipriv->qpn, gid->raw);
 606
 607        if (set_qkey) {
 608                mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
 609                              netdev->name, qkey);
 610                ipriv->qkey = qkey;
 611        }
 612
 613        return err;
 614}
 615
 616static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
 617                              union ib_gid *gid, u16 lid)
 618{
 619        struct mlx5e_priv    *epriv = mlx5i_epriv(netdev);
 620        struct mlx5_core_dev *mdev  = epriv->mdev;
 621        struct mlx5i_priv    *ipriv = epriv->ppriv;
 622        int err;
 623
 624        mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
 625                      gid->raw);
 626
 627        err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
 628        if (err)
 629                mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
 630                              ipriv->qpn, gid->raw);
 631
 632        return err;
 633}
 634
 635static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
 636                      struct ib_ah *address, u32 dqpn)
 637{
 638        struct mlx5e_priv *epriv = mlx5i_epriv(dev);
 639        struct mlx5e_txqsq *sq   = epriv->txq2sq[skb_get_queue_mapping(skb)];
 640        struct mlx5_ib_ah *mah   = to_mah(address);
 641        struct mlx5i_priv *ipriv = epriv->ppriv;
 642
 643        mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
 644
 645        return NETDEV_TX_OK;
 646}
 647
 648static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
 649{
 650        struct mlx5i_priv *ipriv = netdev_priv(netdev);
 651
 652        ipriv->pkey_index = (u16)id;
 653}
 654
 655static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
 656{
 657        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
 658                return -EOPNOTSUPP;
 659
 660        if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
 661                mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
 662                return -EOPNOTSUPP;
 663        }
 664
 665        return 0;
 666}
 667
 668static void mlx5_rdma_netdev_free(struct net_device *netdev)
 669{
 670        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
 671        struct mlx5_core_dev *mdev = priv->mdev;
 672        struct mlx5i_priv *ipriv = priv->ppriv;
 673        const struct mlx5e_profile *profile = priv->profile;
 674
 675        mlx5e_detach_netdev(priv);
 676        profile->cleanup(priv);
 677
 678        if (!ipriv->sub_interface) {
 679                mlx5i_pkey_qpn_ht_cleanup(netdev);
 680                mlx5e_destroy_mdev_resources(mdev);
 681        }
 682}
 683
 684static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
 685{
 686        return mdev->mlx5e_res.hw_objs.pdn != 0;
 687}
 688
 689static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
 690{
 691        if (mlx5_is_sub_interface(mdev))
 692                return mlx5i_pkey_get_profile();
 693        return &mlx5i_nic_profile;
 694}
 695
 696static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
 697                              struct net_device *netdev, void *param)
 698{
 699        struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
 700        const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
 701        struct mlx5i_priv *ipriv;
 702        struct mlx5e_priv *epriv;
 703        struct rdma_netdev *rn;
 704        int err;
 705
 706        ipriv = netdev_priv(netdev);
 707        epriv = mlx5i_epriv(netdev);
 708
 709        ipriv->sub_interface = mlx5_is_sub_interface(mdev);
 710        if (!ipriv->sub_interface) {
 711                err = mlx5i_pkey_qpn_ht_init(netdev);
 712                if (err) {
 713                        mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
 714                        return err;
 715                }
 716
 717                /* This should only be called once per mdev */
 718                err = mlx5e_create_mdev_resources(mdev);
 719                if (err)
 720                        goto destroy_ht;
 721        }
 722
 723        err = mlx5e_priv_init(epriv, prof, netdev, mdev);
 724        if (err)
 725                goto destroy_mdev_resources;
 726
 727        epriv->profile = prof;
 728        epriv->ppriv = ipriv;
 729
 730        prof->init(mdev, netdev);
 731
 732        err = mlx5e_attach_netdev(epriv);
 733        if (err)
 734                goto detach;
 735        netif_carrier_off(netdev);
 736
 737        /* set rdma_netdev func pointers */
 738        rn = &ipriv->rn;
 739        rn->hca  = ibdev;
 740        rn->send = mlx5i_xmit;
 741        rn->attach_mcast = mlx5i_attach_mcast;
 742        rn->detach_mcast = mlx5i_detach_mcast;
 743        rn->set_id = mlx5i_set_pkey_index;
 744
 745        netdev->priv_destructor = mlx5_rdma_netdev_free;
 746        netdev->needs_free_netdev = 1;
 747
 748        return 0;
 749
 750detach:
 751        prof->cleanup(epriv);
 752        if (ipriv->sub_interface)
 753                return err;
 754destroy_mdev_resources:
 755        mlx5e_destroy_mdev_resources(mdev);
 756destroy_ht:
 757        mlx5i_pkey_qpn_ht_cleanup(netdev);
 758        return err;
 759}
 760
 761int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
 762                            struct ib_device *device,
 763                            struct rdma_netdev_alloc_params *params)
 764{
 765        int nch;
 766        int rc;
 767
 768        rc = mlx5i_check_required_hca_cap(mdev);
 769        if (rc)
 770                return rc;
 771
 772        nch = mlx5e_get_max_num_channels(mdev);
 773
 774        *params = (struct rdma_netdev_alloc_params){
 775                .sizeof_priv = sizeof(struct mlx5i_priv) +
 776                               sizeof(struct mlx5e_priv),
 777                .txqs = nch * MLX5E_MAX_NUM_TC,
 778                .rxqs = nch,
 779                .param = mdev,
 780                .initialize_rdma_netdev = mlx5_rdma_setup_rn,
 781        };
 782
 783        return 0;
 784}
 785EXPORT_SYMBOL(mlx5_rdma_rn_get_params);
 786