linux/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/pci.h>
  34#include <linux/mlx5/driver.h>
  35#include <linux/mlx5/vport.h>
  36#include "mlx5_core.h"
  37#include "eswitch.h"
  38
  39bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
  40{
  41        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
  42
  43        return !!sriov->num_vfs;
  44}
  45
  46static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
  47{
  48        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
  49        struct mlx5_hca_vport_context *in;
  50        int err = 0;
  51
  52        /* Restore sriov guid and policy settings */
  53        if (sriov->vfs_ctx[vf].node_guid ||
  54            sriov->vfs_ctx[vf].port_guid ||
  55            sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) {
  56                in = kzalloc(sizeof(*in), GFP_KERNEL);
  57                if (!in)
  58                        return -ENOMEM;
  59
  60                in->node_guid = sriov->vfs_ctx[vf].node_guid;
  61                in->port_guid = sriov->vfs_ctx[vf].port_guid;
  62                in->policy = sriov->vfs_ctx[vf].policy;
  63                in->field_select =
  64                        !!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID |
  65                        !!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
  66                        !!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;
  67
  68                err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
  69                if (err)
  70                        mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);
  71
  72                kfree(in);
  73        }
  74
  75        return err;
  76}
  77
  78static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
  79{
  80        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
  81        int err;
  82        int vf;
  83
  84        if (sriov->enabled_vfs) {
  85                mlx5_core_warn(dev,
  86                               "failed to enable SRIOV on device, already enabled with %d vfs\n",
  87                               sriov->enabled_vfs);
  88                return -EBUSY;
  89        }
  90
  91        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
  92        if (err) {
  93                mlx5_core_warn(dev,
  94                               "failed to enable eswitch SRIOV (%d)\n", err);
  95                return err;
  96        }
  97
  98        for (vf = 0; vf < num_vfs; vf++) {
  99                err = mlx5_core_enable_hca(dev, vf + 1);
 100                if (err) {
 101                        mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
 102                        continue;
 103                }
 104                sriov->vfs_ctx[vf].enabled = 1;
 105                sriov->enabled_vfs++;
 106                if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
 107                        err = sriov_restore_guids(dev, vf);
 108                        if (err) {
 109                                mlx5_core_warn(dev,
 110                                               "failed to restore VF %d settings, err %d\n",
 111                                               vf, err);
 112                                continue;
 113                        }
 114                }
 115                mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
 116        }
 117
 118        return 0;
 119}
 120
 121static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
 122{
 123        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 124        int err;
 125        int vf;
 126
 127        if (!sriov->enabled_vfs)
 128                goto out;
 129
 130        for (vf = 0; vf < sriov->num_vfs; vf++) {
 131                if (!sriov->vfs_ctx[vf].enabled)
 132                        continue;
 133                err = mlx5_core_disable_hca(dev, vf + 1);
 134                if (err) {
 135                        mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
 136                        continue;
 137                }
 138                sriov->vfs_ctx[vf].enabled = 0;
 139                sriov->enabled_vfs--;
 140        }
 141
 142out:
 143        mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 144
 145        if (mlx5_wait_for_vf_pages(dev))
 146                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
 147}
 148
 149static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
 150{
 151        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 152        int err = 0;
 153
 154        if (pci_num_vf(pdev)) {
 155                mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
 156                return -EBUSY;
 157        }
 158
 159        err = pci_enable_sriov(pdev, num_vfs);
 160        if (err)
 161                mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
 162
 163        return err;
 164}
 165
 166static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
 167{
 168        pci_disable_sriov(pdev);
 169}
 170
 171static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
 172{
 173        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 174        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 175        int err = 0;
 176
 177        err = mlx5_device_enable_sriov(dev, num_vfs);
 178        if (err) {
 179                mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
 180                return err;
 181        }
 182
 183        err = mlx5_pci_enable_sriov(pdev, num_vfs);
 184        if (err) {
 185                mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
 186                mlx5_device_disable_sriov(dev);
 187                return err;
 188        }
 189
 190        sriov->num_vfs = num_vfs;
 191
 192        return 0;
 193}
 194
 195static void mlx5_sriov_disable(struct pci_dev *pdev)
 196{
 197        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 198        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 199
 200        mlx5_pci_disable_sriov(pdev);
 201        mlx5_device_disable_sriov(dev);
 202        sriov->num_vfs = 0;
 203}
 204
 205int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
 206{
 207        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 208        int err = 0;
 209
 210        mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
 211        if (!mlx5_core_is_pf(dev))
 212                return -EPERM;
 213
 214        if (num_vfs) {
 215                int ret;
 216
 217                ret = mlx5_lag_forbid(dev);
 218                if (ret && (ret != -ENODEV))
 219                        return ret;
 220        }
 221
 222        if (num_vfs) {
 223                err = mlx5_sriov_enable(pdev, num_vfs);
 224        } else {
 225                mlx5_sriov_disable(pdev);
 226                mlx5_lag_allow(dev);
 227        }
 228
 229        return err ? err : num_vfs;
 230}
 231
 232int mlx5_sriov_attach(struct mlx5_core_dev *dev)
 233{
 234        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 235
 236        if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
 237                return 0;
 238
 239        /* If sriov VFs exist in PCI level, enable them in device level */
 240        return mlx5_device_enable_sriov(dev, sriov->num_vfs);
 241}
 242
 243void mlx5_sriov_detach(struct mlx5_core_dev *dev)
 244{
 245        if (!mlx5_core_is_pf(dev))
 246                return;
 247
 248        mlx5_device_disable_sriov(dev);
 249}
 250
 251int mlx5_sriov_init(struct mlx5_core_dev *dev)
 252{
 253        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 254        struct pci_dev *pdev = dev->pdev;
 255        int total_vfs;
 256
 257        if (!mlx5_core_is_pf(dev))
 258                return 0;
 259
 260        total_vfs = pci_sriov_get_totalvfs(pdev);
 261        sriov->num_vfs = pci_num_vf(pdev);
 262        sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
 263        if (!sriov->vfs_ctx)
 264                return -ENOMEM;
 265
 266        return 0;
 267}
 268
 269void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
 270{
 271        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 272
 273        if (!mlx5_core_is_pf(dev))
 274                return;
 275
 276        kfree(sriov->vfs_ctx);
 277}
 278