linux/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <rdma/ib_verbs.h>
  34#include <linux/mlx5/fs.h>
  35#include "en.h"
  36#include "ipoib.h"
  37
  38#define IB_DEFAULT_Q_KEY   0xb1b
  39#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
  40
  41static int mlx5i_open(struct net_device *netdev);
  42static int mlx5i_close(struct net_device *netdev);
  43static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
  44
  45static const struct net_device_ops mlx5i_netdev_ops = {
  46        .ndo_open                = mlx5i_open,
  47        .ndo_stop                = mlx5i_close,
  48        .ndo_init                = mlx5i_dev_init,
  49        .ndo_uninit              = mlx5i_dev_cleanup,
  50        .ndo_change_mtu          = mlx5i_change_mtu,
  51        .ndo_do_ioctl            = mlx5i_ioctl,
  52};
  53
  54/* IPoIB mlx5 netdev profile */
  55static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
  56                                   struct mlx5e_params *params)
  57{
  58        /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
  59        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
  60        mlx5e_set_rq_type(mdev, params);
  61        mlx5e_init_rq_type_params(mdev, params);
  62
  63        /* RQ size in ipoib by default is 512 */
  64        params->log_rq_mtu_frames = is_kdump_kernel() ?
  65                MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
  66                MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
  67
  68        params->lro_en = false;
  69        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
  70}
  71
  72/* Called directly after IPoIB netdevice was created to initialize SW structs */
  73void mlx5i_init(struct mlx5_core_dev *mdev,
  74                struct net_device *netdev,
  75                const struct mlx5e_profile *profile,
  76                void *ppriv)
  77{
  78        struct mlx5e_priv *priv  = mlx5i_epriv(netdev);
  79
  80        /* priv init */
  81        priv->mdev        = mdev;
  82        priv->netdev      = netdev;
  83        priv->profile     = profile;
  84        priv->ppriv       = ppriv;
  85        mutex_init(&priv->state_lock);
  86
  87        mlx5e_build_nic_params(mdev, &priv->channels.params,
  88                               profile->max_nch(mdev), netdev->mtu);
  89        mlx5i_build_nic_params(mdev, &priv->channels.params);
  90
  91        mlx5e_timestamp_init(priv);
  92
  93        /* netdev init */
  94        netdev->hw_features    |= NETIF_F_SG;
  95        netdev->hw_features    |= NETIF_F_IP_CSUM;
  96        netdev->hw_features    |= NETIF_F_IPV6_CSUM;
  97        netdev->hw_features    |= NETIF_F_GRO;
  98        netdev->hw_features    |= NETIF_F_TSO;
  99        netdev->hw_features    |= NETIF_F_TSO6;
 100        netdev->hw_features    |= NETIF_F_RXCSUM;
 101        netdev->hw_features    |= NETIF_F_RXHASH;
 102
 103        netdev->netdev_ops = &mlx5i_netdev_ops;
 104        netdev->ethtool_ops = &mlx5i_ethtool_ops;
 105}
 106
 107/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
 108static void mlx5i_cleanup(struct mlx5e_priv *priv)
 109{
 110        /* Do nothing .. */
 111}
 112
 113int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
 114{
 115        struct mlx5_core_dev *mdev = priv->mdev;
 116        struct mlx5i_priv *ipriv = priv->ppriv;
 117        struct mlx5_core_qp *qp = &ipriv->qp;
 118        struct mlx5_qp_context *context;
 119        int ret;
 120
 121        /* QP states */
 122        context = kzalloc(sizeof(*context), GFP_KERNEL);
 123        if (!context)
 124                return -ENOMEM;
 125
 126        context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
 127        context->pri_path.port = 1;
 128        context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
 129        context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
 130
 131        ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
 132        if (ret) {
 133                mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
 134                goto err_qp_modify_to_err;
 135        }
 136        memset(context, 0, sizeof(*context));
 137
 138        ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
 139        if (ret) {
 140                mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
 141                goto err_qp_modify_to_err;
 142        }
 143
 144        ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
 145        if (ret) {
 146                mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
 147                goto err_qp_modify_to_err;
 148        }
 149
 150        kfree(context);
 151        return 0;
 152
 153err_qp_modify_to_err:
 154        mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
 155        kfree(context);
 156        return ret;
 157}
 158
 159void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
 160{
 161        struct mlx5i_priv *ipriv = priv->ppriv;
 162        struct mlx5_core_dev *mdev = priv->mdev;
 163        struct mlx5_qp_context context;
 164        int err;
 165
 166        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
 167                                  &ipriv->qp);
 168        if (err)
 169                mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
 170}
 171
 172#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
 173
 174int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
 175{
 176        u32 *in = NULL;
 177        void *addr_path;
 178        int ret = 0;
 179        int inlen;
 180        void *qpc;
 181
 182        inlen = MLX5_ST_SZ_BYTES(create_qp_in);
 183        in = kvzalloc(inlen, GFP_KERNEL);
 184        if (!in)
 185                return -ENOMEM;
 186
 187        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 188        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
 189        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 190        MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
 191                 MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
 192
 193        addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
 194        MLX5_SET(ads, addr_path, vhca_port_num, 1);
 195        MLX5_SET(ads, addr_path, grh, 1);
 196
 197        ret = mlx5_core_create_qp(mdev, qp, in, inlen);
 198        if (ret) {
 199                mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
 200                goto out;
 201        }
 202
 203out:
 204        kvfree(in);
 205        return ret;
 206}
 207
 208void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
 209{
 210        mlx5_core_destroy_qp(mdev, qp);
 211}
 212
 213static int mlx5i_init_tx(struct mlx5e_priv *priv)
 214{
 215        struct mlx5i_priv *ipriv = priv->ppriv;
 216        int err;
 217
 218        err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
 219        if (err) {
 220                mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
 221                return err;
 222        }
 223
 224        err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
 225        if (err) {
 226                mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
 227                goto err_destroy_underlay_qp;
 228        }
 229
 230        return 0;
 231
 232err_destroy_underlay_qp:
 233        mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
 234        return err;
 235}
 236
 237static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
 238{
 239        struct mlx5i_priv *ipriv = priv->ppriv;
 240
 241        mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
 242        mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
 243}
 244
 245static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
 246{
 247        struct ttc_params ttc_params = {};
 248        int tt, err;
 249
 250        priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
 251                                               MLX5_FLOW_NAMESPACE_KERNEL);
 252
 253        if (!priv->fs.ns)
 254                return -EINVAL;
 255
 256        err = mlx5e_arfs_create_tables(priv);
 257        if (err) {
 258                netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
 259                           err);
 260                priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
 261        }
 262
 263        mlx5e_set_ttc_basic_params(priv, &ttc_params);
 264        mlx5e_set_inner_ttc_ft_params(&ttc_params);
 265        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
 266                ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
 267
 268        err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
 269        if (err) {
 270                netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
 271                           err);
 272                goto err_destroy_arfs_tables;
 273        }
 274
 275        mlx5e_set_ttc_ft_params(&ttc_params);
 276        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
 277                ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
 278
 279        err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
 280        if (err) {
 281                netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
 282                           err);
 283                goto err_destroy_inner_ttc_table;
 284        }
 285
 286        return 0;
 287
 288err_destroy_inner_ttc_table:
 289        mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
 290err_destroy_arfs_tables:
 291        mlx5e_arfs_destroy_tables(priv);
 292
 293        return err;
 294}
 295
 296static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
 297{
 298        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
 299        mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
 300        mlx5e_arfs_destroy_tables(priv);
 301}
 302
 303static int mlx5i_init_rx(struct mlx5e_priv *priv)
 304{
 305        int err;
 306
 307        err = mlx5e_create_indirect_rqt(priv);
 308        if (err)
 309                return err;
 310
 311        err = mlx5e_create_direct_rqts(priv);
 312        if (err)
 313                goto err_destroy_indirect_rqts;
 314
 315        err = mlx5e_create_indirect_tirs(priv);
 316        if (err)
 317                goto err_destroy_direct_rqts;
 318
 319        err = mlx5e_create_direct_tirs(priv);
 320        if (err)
 321                goto err_destroy_indirect_tirs;
 322
 323        err = mlx5i_create_flow_steering(priv);
 324        if (err)
 325                goto err_destroy_direct_tirs;
 326
 327        return 0;
 328
 329err_destroy_direct_tirs:
 330        mlx5e_destroy_direct_tirs(priv);
 331err_destroy_indirect_tirs:
 332        mlx5e_destroy_indirect_tirs(priv);
 333err_destroy_direct_rqts:
 334        mlx5e_destroy_direct_rqts(priv);
 335err_destroy_indirect_rqts:
 336        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
 337        return err;
 338}
 339
 340static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
 341{
 342        mlx5i_destroy_flow_steering(priv);
 343        mlx5e_destroy_direct_tirs(priv);
 344        mlx5e_destroy_indirect_tirs(priv);
 345        mlx5e_destroy_direct_rqts(priv);
 346        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
 347}
 348
 349static const struct mlx5e_profile mlx5i_nic_profile = {
 350        .init              = mlx5i_init,
 351        .cleanup           = mlx5i_cleanup,
 352        .init_tx           = mlx5i_init_tx,
 353        .cleanup_tx        = mlx5i_cleanup_tx,
 354        .init_rx           = mlx5i_init_rx,
 355        .cleanup_rx        = mlx5i_cleanup_rx,
 356        .enable            = NULL, /* mlx5i_enable */
 357        .disable           = NULL, /* mlx5i_disable */
 358        .update_stats      = NULL, /* mlx5i_update_stats */
 359        .max_nch           = mlx5e_get_max_num_channels,
 360        .update_carrier    = NULL, /* no HW update in IB link */
 361        .rx_handlers.handle_rx_cqe       = mlx5i_handle_rx_cqe,
 362        .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
 363        .max_tc            = MLX5I_MAX_NUM_TC,
 364};
 365
 366/* mlx5i netdev NDos */
 367
 368static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
 369{
 370        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
 371        struct mlx5e_channels new_channels = {};
 372        struct mlx5e_params *params;
 373        int err = 0;
 374
 375        mutex_lock(&priv->state_lock);
 376
 377        params = &priv->channels.params;
 378
 379        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 380                params->sw_mtu = new_mtu;
 381                netdev->mtu = params->sw_mtu;
 382                goto out;
 383        }
 384
 385        new_channels.params = *params;
 386        new_channels.params.sw_mtu = new_mtu;
 387        err = mlx5e_open_channels(priv, &new_channels);
 388        if (err)
 389                goto out;
 390
 391        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 392        netdev->mtu = new_channels.params.sw_mtu;
 393
 394out:
 395        mutex_unlock(&priv->state_lock);
 396        return err;
 397}
 398
 399int mlx5i_dev_init(struct net_device *dev)
 400{
 401        struct mlx5e_priv    *priv   = mlx5i_epriv(dev);
 402        struct mlx5i_priv    *ipriv  = priv->ppriv;
 403
 404        /* Set dev address using underlay QP */
 405        dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
 406        dev->dev_addr[2] = (ipriv->qp.qpn >>  8) & 0xff;
 407        dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
 408
 409        /* Add QPN to net-device mapping to HT */
 410        mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
 411
 412        return 0;
 413}
 414
 415int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 416{
 417        struct mlx5e_priv *priv = mlx5i_epriv(dev);
 418
 419        switch (cmd) {
 420        case SIOCSHWTSTAMP:
 421                return mlx5e_hwstamp_set(priv, ifr);
 422        case SIOCGHWTSTAMP:
 423                return mlx5e_hwstamp_get(priv, ifr);
 424        default:
 425                return -EOPNOTSUPP;
 426        }
 427}
 428
 429void mlx5i_dev_cleanup(struct net_device *dev)
 430{
 431        struct mlx5e_priv    *priv   = mlx5i_epriv(dev);
 432        struct mlx5i_priv    *ipriv = priv->ppriv;
 433
 434        mlx5i_uninit_underlay_qp(priv);
 435
 436        /* Delete QPN to net-device mapping from HT */
 437        mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
 438}
 439
 440static int mlx5i_open(struct net_device *netdev)
 441{
 442        struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
 443        struct mlx5i_priv *ipriv = epriv->ppriv;
 444        struct mlx5_core_dev *mdev = epriv->mdev;
 445        int err;
 446
 447        mutex_lock(&epriv->state_lock);
 448
 449        set_bit(MLX5E_STATE_OPENED, &epriv->state);
 450
 451        err = mlx5i_init_underlay_qp(epriv);
 452        if (err) {
 453                mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
 454                goto err_clear_state_opened_flag;
 455        }
 456
 457        err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
 458        if (err) {
 459                mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
 460                goto err_reset_qp;
 461        }
 462
 463        err = mlx5e_open_channels(epriv, &epriv->channels);
 464        if (err)
 465                goto err_remove_fs_underlay_qp;
 466
 467        mlx5e_refresh_tirs(epriv, false);
 468        mlx5e_activate_priv_channels(epriv);
 469
 470        mutex_unlock(&epriv->state_lock);
 471        return 0;
 472
 473err_remove_fs_underlay_qp:
 474        mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
 475err_reset_qp:
 476        mlx5i_uninit_underlay_qp(epriv);
 477err_clear_state_opened_flag:
 478        clear_bit(MLX5E_STATE_OPENED, &epriv->state);
 479        mutex_unlock(&epriv->state_lock);
 480        return err;
 481}
 482
 483static int mlx5i_close(struct net_device *netdev)
 484{
 485        struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
 486        struct mlx5i_priv *ipriv = epriv->ppriv;
 487        struct mlx5_core_dev *mdev = epriv->mdev;
 488
 489        /* May already be CLOSED in case a previous configuration operation
 490         * (e.g RX/TX queue size change) that involves close&open failed.
 491         */
 492        mutex_lock(&epriv->state_lock);
 493
 494        if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
 495                goto unlock;
 496
 497        clear_bit(MLX5E_STATE_OPENED, &epriv->state);
 498
 499        netif_carrier_off(epriv->netdev);
 500        mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
 501        mlx5i_uninit_underlay_qp(epriv);
 502        mlx5e_deactivate_priv_channels(epriv);
 503        mlx5e_close_channels(&epriv->channels);
 504unlock:
 505        mutex_unlock(&epriv->state_lock);
 506        return 0;
 507}
 508
 509/* IPoIB RDMA netdev callbacks */
 510static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
 511                              union ib_gid *gid, u16 lid, int set_qkey,
 512                              u32 qkey)
 513{
 514        struct mlx5e_priv    *epriv = mlx5i_epriv(netdev);
 515        struct mlx5_core_dev *mdev  = epriv->mdev;
 516        struct mlx5i_priv    *ipriv = epriv->ppriv;
 517        int err;
 518
 519        mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
 520        err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
 521        if (err)
 522                mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
 523                               ipriv->qp.qpn, gid->raw);
 524
 525        if (set_qkey) {
 526                mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
 527                              netdev->name, qkey);
 528                ipriv->qkey = qkey;
 529        }
 530
 531        return err;
 532}
 533
 534static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
 535                              union ib_gid *gid, u16 lid)
 536{
 537        struct mlx5e_priv    *epriv = mlx5i_epriv(netdev);
 538        struct mlx5_core_dev *mdev  = epriv->mdev;
 539        struct mlx5i_priv    *ipriv = epriv->ppriv;
 540        int err;
 541
 542        mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
 543
 544        err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
 545        if (err)
 546                mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
 547                              ipriv->qp.qpn, gid->raw);
 548
 549        return err;
 550}
 551
 552static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
 553                      struct ib_ah *address, u32 dqpn)
 554{
 555        struct mlx5e_priv *epriv = mlx5i_epriv(dev);
 556        struct mlx5e_txqsq *sq   = epriv->txq2sq[skb_get_queue_mapping(skb)];
 557        struct mlx5_ib_ah *mah   = to_mah(address);
 558        struct mlx5i_priv *ipriv = epriv->ppriv;
 559
 560        return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
 561}
 562
 563static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
 564{
 565        struct mlx5i_priv *ipriv = netdev_priv(netdev);
 566
 567        ipriv->pkey_index = (u16)id;
 568}
 569
 570static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
 571{
 572        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
 573                return -EOPNOTSUPP;
 574
 575        if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
 576                mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
 577                return -EOPNOTSUPP;
 578        }
 579
 580        return 0;
 581}
 582
 583struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
 584                                          struct ib_device *ibdev,
 585                                          const char *name,
 586                                          void (*setup)(struct net_device *))
 587{
 588        const struct mlx5e_profile *profile;
 589        struct net_device *netdev;
 590        struct mlx5i_priv *ipriv;
 591        struct mlx5e_priv *epriv;
 592        struct rdma_netdev *rn;
 593        bool sub_interface;
 594        int nch;
 595        int err;
 596
 597        if (mlx5i_check_required_hca_cap(mdev)) {
 598                mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
 599                return ERR_PTR(-EOPNOTSUPP);
 600        }
 601
 602        /* TODO: Need to find a better way to check if child device*/
 603        sub_interface = (mdev->mlx5e_res.pdn != 0);
 604
 605        if (sub_interface)
 606                profile = mlx5i_pkey_get_profile();
 607        else
 608                profile = &mlx5i_nic_profile;
 609
 610        nch = profile->max_nch(mdev);
 611
 612        netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
 613                                  name, NET_NAME_UNKNOWN,
 614                                  setup,
 615                                  nch * MLX5E_MAX_NUM_TC,
 616                                  nch);
 617        if (!netdev) {
 618                mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
 619                return NULL;
 620        }
 621
 622        ipriv = netdev_priv(netdev);
 623        epriv = mlx5i_epriv(netdev);
 624
 625        epriv->wq = create_singlethread_workqueue("mlx5i");
 626        if (!epriv->wq)
 627                goto err_free_netdev;
 628
 629        ipriv->sub_interface = sub_interface;
 630        if (!ipriv->sub_interface) {
 631                err = mlx5i_pkey_qpn_ht_init(netdev);
 632                if (err) {
 633                        mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
 634                        goto destroy_wq;
 635                }
 636
 637                /* This should only be called once per mdev */
 638                err = mlx5e_create_mdev_resources(mdev);
 639                if (err)
 640                        goto destroy_ht;
 641        }
 642
 643        profile->init(mdev, netdev, profile, ipriv);
 644
 645        mlx5e_attach_netdev(epriv);
 646        netif_carrier_off(netdev);
 647
 648        /* set rdma_netdev func pointers */
 649        rn = &ipriv->rn;
 650        rn->hca  = ibdev;
 651        rn->send = mlx5i_xmit;
 652        rn->attach_mcast = mlx5i_attach_mcast;
 653        rn->detach_mcast = mlx5i_detach_mcast;
 654        rn->set_id = mlx5i_set_pkey_index;
 655
 656        return netdev;
 657
 658destroy_ht:
 659        mlx5i_pkey_qpn_ht_cleanup(netdev);
 660destroy_wq:
 661        destroy_workqueue(epriv->wq);
 662err_free_netdev:
 663        free_netdev(netdev);
 664
 665        return NULL;
 666}
 667EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
 668
 669void mlx5_rdma_netdev_free(struct net_device *netdev)
 670{
 671        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
 672        struct mlx5i_priv *ipriv = priv->ppriv;
 673        const struct mlx5e_profile *profile = priv->profile;
 674
 675        mlx5e_detach_netdev(priv);
 676        profile->cleanup(priv);
 677        destroy_workqueue(priv->wq);
 678
 679        if (!ipriv->sub_interface) {
 680                mlx5i_pkey_qpn_ht_cleanup(netdev);
 681                mlx5e_destroy_mdev_resources(priv->mdev);
 682        }
 683        free_netdev(netdev);
 684}
 685EXPORT_SYMBOL(mlx5_rdma_netdev_free);
 686