linux/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/pci.h>
  34#include <linux/mlx5/driver.h>
  35#include <linux/mlx5/vport.h>
  36#include "mlx5_core.h"
  37#include "eswitch.h"
  38
  39bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
  40{
  41        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
  42
  43        return !!sriov->num_vfs;
  44}
  45
  46static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
  47{
  48        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
  49        struct mlx5_hca_vport_context *in;
  50        int err = 0;
  51
  52        /* Restore sriov guid and policy settings */
  53        if (sriov->vfs_ctx[vf].node_guid ||
  54            sriov->vfs_ctx[vf].port_guid ||
  55            sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) {
  56                in = kzalloc(sizeof(*in), GFP_KERNEL);
  57                if (!in)
  58                        return -ENOMEM;
  59
  60                in->node_guid = sriov->vfs_ctx[vf].node_guid;
  61                in->port_guid = sriov->vfs_ctx[vf].port_guid;
  62                in->policy = sriov->vfs_ctx[vf].policy;
  63                in->field_select =
  64                        !!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID |
  65                        !!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
  66                        !!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;
  67
  68                err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
  69                if (err)
  70                        mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);
  71
  72                kfree(in);
  73        }
  74
  75        return err;
  76}
  77
  78static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
  79{
  80        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
  81        int err;
  82        int vf;
  83
  84        if (sriov->enabled_vfs) {
  85                mlx5_core_warn(dev,
  86                               "failed to enable SRIOV on device, already enabled with %d vfs\n",
  87                               sriov->enabled_vfs);
  88                return -EBUSY;
  89        }
  90
  91        if (!MLX5_ESWITCH_MANAGER(dev))
  92                goto enable_vfs_hca;
  93
  94        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
  95        if (err) {
  96                mlx5_core_warn(dev,
  97                               "failed to enable eswitch SRIOV (%d)\n", err);
  98                return err;
  99        }
 100
 101enable_vfs_hca:
 102        for (vf = 0; vf < num_vfs; vf++) {
 103                err = mlx5_core_enable_hca(dev, vf + 1);
 104                if (err) {
 105                        mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
 106                        continue;
 107                }
 108                sriov->vfs_ctx[vf].enabled = 1;
 109                sriov->enabled_vfs++;
 110                if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
 111                        err = sriov_restore_guids(dev, vf);
 112                        if (err) {
 113                                mlx5_core_warn(dev,
 114                                               "failed to restore VF %d settings, err %d\n",
 115                                               vf, err);
 116                                continue;
 117                        }
 118                }
 119                mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
 120        }
 121
 122        return 0;
 123}
 124
 125static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
 126{
 127        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 128        int err;
 129        int vf;
 130
 131        if (!sriov->enabled_vfs)
 132                goto out;
 133
 134        for (vf = 0; vf < sriov->num_vfs; vf++) {
 135                if (!sriov->vfs_ctx[vf].enabled)
 136                        continue;
 137                err = mlx5_core_disable_hca(dev, vf + 1);
 138                if (err) {
 139                        mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
 140                        continue;
 141                }
 142                sriov->vfs_ctx[vf].enabled = 0;
 143                sriov->enabled_vfs--;
 144        }
 145
 146out:
 147        if (MLX5_ESWITCH_MANAGER(dev))
 148                mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 149
 150        if (mlx5_wait_for_vf_pages(dev))
 151                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
 152}
 153
 154static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
 155{
 156        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 157        int err = 0;
 158
 159        if (pci_num_vf(pdev)) {
 160                mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
 161                return -EBUSY;
 162        }
 163
 164        err = pci_enable_sriov(pdev, num_vfs);
 165        if (err)
 166                mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
 167
 168        return err;
 169}
 170
 171static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
 172{
 173        pci_disable_sriov(pdev);
 174}
 175
 176static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
 177{
 178        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 179        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 180        int err = 0;
 181
 182        err = mlx5_device_enable_sriov(dev, num_vfs);
 183        if (err) {
 184                mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
 185                return err;
 186        }
 187
 188        err = mlx5_pci_enable_sriov(pdev, num_vfs);
 189        if (err) {
 190                mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
 191                mlx5_device_disable_sriov(dev);
 192                return err;
 193        }
 194
 195        sriov->num_vfs = num_vfs;
 196
 197        return 0;
 198}
 199
 200static void mlx5_sriov_disable(struct pci_dev *pdev)
 201{
 202        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 203        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 204
 205        mlx5_pci_disable_sriov(pdev);
 206        mlx5_device_disable_sriov(dev);
 207        sriov->num_vfs = 0;
 208}
 209
 210int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
 211{
 212        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
 213        int err = 0;
 214
 215        mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
 216        if (!mlx5_core_is_pf(dev))
 217                return -EPERM;
 218
 219        if (num_vfs) {
 220                int ret;
 221
 222                ret = mlx5_lag_forbid(dev);
 223                if (ret && (ret != -ENODEV))
 224                        return ret;
 225        }
 226
 227        if (num_vfs) {
 228                err = mlx5_sriov_enable(pdev, num_vfs);
 229        } else {
 230                mlx5_sriov_disable(pdev);
 231                mlx5_lag_allow(dev);
 232        }
 233
 234        return err ? err : num_vfs;
 235}
 236
 237int mlx5_sriov_attach(struct mlx5_core_dev *dev)
 238{
 239        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 240
 241        if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
 242                return 0;
 243
 244        /* If sriov VFs exist in PCI level, enable them in device level */
 245        return mlx5_device_enable_sriov(dev, sriov->num_vfs);
 246}
 247
 248void mlx5_sriov_detach(struct mlx5_core_dev *dev)
 249{
 250        if (!mlx5_core_is_pf(dev))
 251                return;
 252
 253        mlx5_device_disable_sriov(dev);
 254}
 255
 256int mlx5_sriov_init(struct mlx5_core_dev *dev)
 257{
 258        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 259        struct pci_dev *pdev = dev->pdev;
 260        int total_vfs;
 261
 262        if (!mlx5_core_is_pf(dev))
 263                return 0;
 264
 265        total_vfs = pci_sriov_get_totalvfs(pdev);
 266        sriov->num_vfs = pci_num_vf(pdev);
 267        sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
 268        if (!sriov->vfs_ctx)
 269                return -ENOMEM;
 270
 271        return 0;
 272}
 273
 274void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
 275{
 276        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 277
 278        if (!mlx5_core_is_pf(dev))
 279                return;
 280
 281        kfree(sriov->vfs_ctx);
 282}
 283