linux/drivers/infiniband/hw/mlx5/ib_virt.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/module.h>
  34#include <linux/mlx5/vport.h>
  35#include "mlx5_ib.h"
  36
  37static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
  38{
  39        switch (mlx_policy) {
  40        case MLX5_POLICY_DOWN:
  41                return IFLA_VF_LINK_STATE_DISABLE;
  42        case MLX5_POLICY_UP:
  43                return IFLA_VF_LINK_STATE_ENABLE;
  44        case MLX5_POLICY_FOLLOW:
  45                return IFLA_VF_LINK_STATE_AUTO;
  46        default:
  47                return __IFLA_VF_LINK_STATE_MAX;
  48        }
  49}
  50
  51int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
  52                          struct ifla_vf_info *info)
  53{
  54        struct mlx5_ib_dev *dev = to_mdev(device);
  55        struct mlx5_core_dev *mdev = dev->mdev;
  56        struct mlx5_hca_vport_context *rep;
  57        int err;
  58
  59        rep = kzalloc(sizeof(*rep), GFP_KERNEL);
  60        if (!rep)
  61                return -ENOMEM;
  62
  63        err = mlx5_query_hca_vport_context(mdev, 1, 1,  vf + 1, rep);
  64        if (err) {
  65                mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
  66                             vf, err);
  67                goto free;
  68        }
  69        memset(info, 0, sizeof(*info));
  70        info->linkstate = mlx_to_net_policy(rep->policy);
  71        if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
  72                err = -EINVAL;
  73
  74free:
  75        kfree(rep);
  76        return err;
  77}
  78
  79static inline enum port_state_policy net_to_mlx_policy(int policy)
  80{
  81        switch (policy) {
  82        case IFLA_VF_LINK_STATE_DISABLE:
  83                return MLX5_POLICY_DOWN;
  84        case IFLA_VF_LINK_STATE_ENABLE:
  85                return MLX5_POLICY_UP;
  86        case IFLA_VF_LINK_STATE_AUTO:
  87                return MLX5_POLICY_FOLLOW;
  88        default:
  89                return MLX5_POLICY_INVALID;
  90        }
  91}
  92
  93int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
  94                              u8 port, int state)
  95{
  96        struct mlx5_ib_dev *dev = to_mdev(device);
  97        struct mlx5_core_dev *mdev = dev->mdev;
  98        struct mlx5_hca_vport_context *in;
  99        struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
 100        int err;
 101
 102        in = kzalloc(sizeof(*in), GFP_KERNEL);
 103        if (!in)
 104                return -ENOMEM;
 105
 106        in->policy = net_to_mlx_policy(state);
 107        if (in->policy == MLX5_POLICY_INVALID) {
 108                err = -EINVAL;
 109                goto out;
 110        }
 111        in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
 112        err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
 113        if (!err)
 114                vfs_ctx[vf].policy = in->policy;
 115
 116out:
 117        kfree(in);
 118        return err;
 119}
 120
 121int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
 122                         u8 port, struct ifla_vf_stats *stats)
 123{
 124        int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 125        struct mlx5_core_dev *mdev;
 126        struct mlx5_ib_dev *dev;
 127        void *out;
 128        int err;
 129
 130        dev = to_mdev(device);
 131        mdev = dev->mdev;
 132
 133        out = kzalloc(out_sz, GFP_KERNEL);
 134        if (!out)
 135                return -ENOMEM;
 136
 137        err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
 138        if (err)
 139                goto ex;
 140
 141        stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
 142        stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
 143        stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
 144        stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
 145        stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
 146
 147ex:
 148        kfree(out);
 149        return err;
 150}
 151
 152static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
 153{
 154        struct mlx5_ib_dev *dev = to_mdev(device);
 155        struct mlx5_core_dev *mdev = dev->mdev;
 156        struct mlx5_hca_vport_context *in;
 157        struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
 158        int err;
 159
 160        in = kzalloc(sizeof(*in), GFP_KERNEL);
 161        if (!in)
 162                return -ENOMEM;
 163
 164        in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
 165        in->node_guid = guid;
 166        err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
 167        if (!err)
 168                vfs_ctx[vf].node_guid = guid;
 169        kfree(in);
 170        return err;
 171}
 172
 173static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
 174{
 175        struct mlx5_ib_dev *dev = to_mdev(device);
 176        struct mlx5_core_dev *mdev = dev->mdev;
 177        struct mlx5_hca_vport_context *in;
 178        struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
 179        int err;
 180
 181        in = kzalloc(sizeof(*in), GFP_KERNEL);
 182        if (!in)
 183                return -ENOMEM;
 184
 185        in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
 186        in->port_guid = guid;
 187        err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
 188        if (!err)
 189                vfs_ctx[vf].port_guid = guid;
 190        kfree(in);
 191        return err;
 192}
 193
 194int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
 195                        u64 guid, int type)
 196{
 197        if (type == IFLA_VF_IB_NODE_GUID)
 198                return set_vf_node_guid(device, vf, port, guid);
 199        else if (type == IFLA_VF_IB_PORT_GUID)
 200                return set_vf_port_guid(device, vf, port, guid);
 201
 202        return -EINVAL;
 203}
 204