linux/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* Copyright (c) 2020 Mellanox Technologies Ltd */
   3
   4#include <linux/mlx5/driver.h>
   5#include <linux/mlx5/device.h>
   6#include "mlx5_core.h"
   7#include "dev.h"
   8#include "sf/vhca_event.h"
   9#include "sf/sf.h"
  10#include "sf/mlx5_ifc_vhca_event.h"
  11#include "ecpf.h"
  12
  13struct mlx5_sf_dev_table {
  14        struct xarray devices;
  15        unsigned int max_sfs;
  16        phys_addr_t base_address;
  17        u64 sf_bar_length;
  18        struct notifier_block nb;
  19        struct mlx5_core_dev *dev;
  20};
  21
  22static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
  23{
  24        return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
  25}
  26
  27bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
  28{
  29        struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
  30
  31        if (!mlx5_sf_dev_supported(dev))
  32                return false;
  33
  34        return !xa_empty(&table->devices);
  35}
  36
  37static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
  38{
  39        struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
  40        struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
  41
  42        return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
  43}
  44static DEVICE_ATTR_RO(sfnum);
  45
  46static struct attribute *sf_device_attrs[] = {
  47        &dev_attr_sfnum.attr,
  48        NULL,
  49};
  50
  51static const struct attribute_group sf_attr_group = {
  52        .attrs = sf_device_attrs,
  53};
  54
  55static const struct attribute_group *sf_attr_groups[2] = {
  56        &sf_attr_group,
  57        NULL
  58};
  59
  60static void mlx5_sf_dev_release(struct device *device)
  61{
  62        struct auxiliary_device *adev = container_of(device, struct auxiliary_device, dev);
  63        struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
  64
  65        mlx5_adev_idx_free(adev->id);
  66        kfree(sf_dev);
  67}
  68
  69static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
  70{
  71        auxiliary_device_delete(&sf_dev->adev);
  72        auxiliary_device_uninit(&sf_dev->adev);
  73}
  74
  75static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
  76{
  77        struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
  78        struct mlx5_sf_dev *sf_dev;
  79        struct pci_dev *pdev;
  80        int err;
  81        int id;
  82
  83        id = mlx5_adev_idx_alloc();
  84        if (id < 0) {
  85                err = id;
  86                goto add_err;
  87        }
  88
  89        sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
  90        if (!sf_dev) {
  91                mlx5_adev_idx_free(id);
  92                err = -ENOMEM;
  93                goto add_err;
  94        }
  95        pdev = dev->pdev;
  96        sf_dev->adev.id = id;
  97        sf_dev->adev.name = MLX5_SF_DEV_ID_NAME;
  98        sf_dev->adev.dev.release = mlx5_sf_dev_release;
  99        sf_dev->adev.dev.parent = &pdev->dev;
 100        sf_dev->adev.dev.groups = sf_attr_groups;
 101        sf_dev->sfnum = sfnum;
 102        sf_dev->parent_mdev = dev;
 103
 104        if (!table->max_sfs) {
 105                mlx5_adev_idx_free(id);
 106                kfree(sf_dev);
 107                err = -EOPNOTSUPP;
 108                goto add_err;
 109        }
 110        sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
 111
 112        err = auxiliary_device_init(&sf_dev->adev);
 113        if (err) {
 114                mlx5_adev_idx_free(id);
 115                kfree(sf_dev);
 116                goto add_err;
 117        }
 118
 119        err = auxiliary_device_add(&sf_dev->adev);
 120        if (err) {
 121                put_device(&sf_dev->adev.dev);
 122                goto add_err;
 123        }
 124
 125        err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL);
 126        if (err)
 127                goto xa_err;
 128        return;
 129
 130xa_err:
 131        mlx5_sf_dev_remove(sf_dev);
 132add_err:
 133        mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
 134                      sf_index, sfnum, err);
 135}
 136
 137static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev, u16 sf_index)
 138{
 139        struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
 140
 141        xa_erase(&table->devices, sf_index);
 142        mlx5_sf_dev_remove(sf_dev);
 143}
 144
 145static int
 146mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
 147{
 148        struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
 149        const struct mlx5_vhca_state_event *event = data;
 150        struct mlx5_sf_dev *sf_dev;
 151        u16 max_functions;
 152        u16 sf_index;
 153        u16 base_id;
 154
 155        max_functions = mlx5_sf_max_functions(table->dev);
 156        if (!max_functions)
 157                return 0;
 158
 159        base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
 160        if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
 161                return 0;
 162
 163        sf_index = event->function_id - base_id;
 164        sf_dev = xa_load(&table->devices, sf_index);
 165        switch (event->new_vhca_state) {
 166        case MLX5_VHCA_STATE_INVALID:
 167        case MLX5_VHCA_STATE_ALLOCATED:
 168                if (sf_dev)
 169                        mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
 170                break;
 171        case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
 172                if (sf_dev)
 173                        mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
 174                else
 175                        mlx5_core_err(table->dev,
 176                                      "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
 177                                      sf_index, event->sw_function_id);
 178                break;
 179        case MLX5_VHCA_STATE_ACTIVE:
 180                if (!sf_dev)
 181                        mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
 182                break;
 183        default:
 184                break;
 185        }
 186        return 0;
 187}
 188
 189static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
 190{
 191        struct mlx5_core_dev *dev = table->dev;
 192        u16 max_functions;
 193        u16 function_id;
 194        int err = 0;
 195        int i;
 196
 197        max_functions = mlx5_sf_max_functions(dev);
 198        function_id = MLX5_CAP_GEN(dev, sf_base_id);
 199        /* Arm the vhca context as the vhca event notifier */
 200        for (i = 0; i < max_functions; i++) {
 201                err = mlx5_vhca_event_arm(dev, function_id);
 202                if (err)
 203                        return err;
 204
 205                function_id++;
 206        }
 207        return 0;
 208}
 209
 210void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
 211{
 212        struct mlx5_sf_dev_table *table;
 213        unsigned int max_sfs;
 214        int err;
 215
 216        if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
 217                return;
 218
 219        table = kzalloc(sizeof(*table), GFP_KERNEL);
 220        if (!table) {
 221                err = -ENOMEM;
 222                goto table_err;
 223        }
 224
 225        table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
 226        table->dev = dev;
 227        if (MLX5_CAP_GEN(dev, max_num_sf))
 228                max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
 229        else
 230                max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
 231        table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
 232        table->base_address = pci_resource_start(dev->pdev, 2);
 233        table->max_sfs = max_sfs;
 234        xa_init(&table->devices);
 235        dev->priv.sf_dev_table = table;
 236
 237        err = mlx5_vhca_event_notifier_register(dev, &table->nb);
 238        if (err)
 239                goto vhca_err;
 240        err = mlx5_sf_dev_vhca_arm_all(table);
 241        if (err)
 242                goto arm_err;
 243        mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
 244        return;
 245
 246arm_err:
 247        mlx5_vhca_event_notifier_unregister(dev, &table->nb);
 248vhca_err:
 249        table->max_sfs = 0;
 250        kfree(table);
 251        dev->priv.sf_dev_table = NULL;
 252table_err:
 253        mlx5_core_err(dev, "SF DEV table create err = %d\n", err);
 254}
 255
 256static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
 257{
 258        struct mlx5_sf_dev *sf_dev;
 259        unsigned long index;
 260
 261        xa_for_each(&table->devices, index, sf_dev) {
 262                xa_erase(&table->devices, index);
 263                mlx5_sf_dev_remove(sf_dev);
 264        }
 265}
 266
 267void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
 268{
 269        struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
 270
 271        if (!table)
 272                return;
 273
 274        mlx5_vhca_event_notifier_unregister(dev, &table->nb);
 275
 276        /* Now that event handler is not running, it is safe to destroy
 277         * the sf device without race.
 278         */
 279        mlx5_sf_dev_destroy_all(table);
 280
 281        WARN_ON(!xa_empty(&table->devices));
 282        kfree(table);
 283        dev->priv.sf_dev_table = NULL;
 284}
 285