linux/drivers/net/ethernet/mellanox/mlx5/core/dev.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/driver.h>
  34#include "mlx5_core.h"
  35
  36static LIST_HEAD(intf_list);
  37static LIST_HEAD(mlx5_dev_list);
  38/* intf dev list mutex */
  39static DEFINE_MUTEX(mlx5_intf_mutex);
  40
  41struct mlx5_device_context {
  42        struct list_head        list;
  43        struct mlx5_interface  *intf;
  44        void                   *context;
  45        unsigned long           state;
  46};
  47
  48struct mlx5_delayed_event {
  49        struct list_head        list;
  50        struct mlx5_core_dev    *dev;
  51        enum mlx5_dev_event     event;
  52        unsigned long           param;
  53};
  54
  55enum {
  56        MLX5_INTERFACE_ADDED,
  57        MLX5_INTERFACE_ATTACHED,
  58};
  59
  60static void add_delayed_event(struct mlx5_priv *priv,
  61                              struct mlx5_core_dev *dev,
  62                              enum mlx5_dev_event event,
  63                              unsigned long param)
  64{
  65        struct mlx5_delayed_event *delayed_event;
  66
  67        delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
  68        if (!delayed_event) {
  69                mlx5_core_err(dev, "event %d is missed\n", event);
  70                return;
  71        }
  72
  73        mlx5_core_dbg(dev, "Accumulating event %d\n", event);
  74        delayed_event->dev = dev;
  75        delayed_event->event = event;
  76        delayed_event->param = param;
  77        list_add_tail(&delayed_event->list, &priv->waiting_events_list);
  78}
  79
  80static void delayed_event_release(struct mlx5_device_context *dev_ctx,
  81                                  struct mlx5_priv *priv)
  82{
  83        struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
  84        struct mlx5_delayed_event *de;
  85        struct mlx5_delayed_event *n;
  86        struct list_head temp;
  87
  88        INIT_LIST_HEAD(&temp);
  89
  90        spin_lock_irq(&priv->ctx_lock);
  91
  92        priv->is_accum_events = false;
  93        list_splice_init(&priv->waiting_events_list, &temp);
  94        if (!dev_ctx->context)
  95                goto out;
  96        list_for_each_entry_safe(de, n, &temp, list)
  97                dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
  98
  99out:
 100        spin_unlock_irq(&priv->ctx_lock);
 101
 102        list_for_each_entry_safe(de, n, &temp, list) {
 103                list_del(&de->list);
 104                kfree(de);
 105        }
 106}
 107
 108/* accumulating events that can come after mlx5_ib calls to
 109 * ib_register_device, till adding that interface to the events list.
 110 */
 111static void delayed_event_start(struct mlx5_priv *priv)
 112{
 113        spin_lock_irq(&priv->ctx_lock);
 114        priv->is_accum_events = true;
 115        spin_unlock_irq(&priv->ctx_lock);
 116}
 117
 118void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
 119{
 120        struct mlx5_device_context *dev_ctx;
 121        struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
 122
 123        if (!mlx5_lag_intf_add(intf, priv))
 124                return;
 125
 126        dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
 127        if (!dev_ctx)
 128                return;
 129
 130        dev_ctx->intf = intf;
 131
 132        delayed_event_start(priv);
 133
 134        dev_ctx->context = intf->add(dev);
 135        set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
 136        if (intf->attach)
 137                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
 138
 139        if (dev_ctx->context) {
 140                spin_lock_irq(&priv->ctx_lock);
 141                list_add_tail(&dev_ctx->list, &priv->ctx_list);
 142
 143#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 144                if (dev_ctx->intf->pfault) {
 145                        if (priv->pfault) {
 146                                mlx5_core_err(dev, "multiple page fault handlers not supported");
 147                        } else {
 148                                priv->pfault_ctx = dev_ctx->context;
 149                                priv->pfault = dev_ctx->intf->pfault;
 150                        }
 151                }
 152#endif
 153                spin_unlock_irq(&priv->ctx_lock);
 154        }
 155
 156        delayed_event_release(dev_ctx, priv);
 157
 158        if (!dev_ctx->context)
 159                kfree(dev_ctx);
 160}
 161
 162static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
 163                                                   struct mlx5_priv *priv)
 164{
 165        struct mlx5_device_context *dev_ctx;
 166
 167        list_for_each_entry(dev_ctx, &priv->ctx_list, list)
 168                if (dev_ctx->intf == intf)
 169                        return dev_ctx;
 170        return NULL;
 171}
 172
 173void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
 174{
 175        struct mlx5_device_context *dev_ctx;
 176        struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
 177
 178        dev_ctx = mlx5_get_device(intf, priv);
 179        if (!dev_ctx)
 180                return;
 181
 182#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 183        spin_lock_irq(&priv->ctx_lock);
 184        if (priv->pfault == dev_ctx->intf->pfault)
 185                priv->pfault = NULL;
 186        spin_unlock_irq(&priv->ctx_lock);
 187
 188        synchronize_srcu(&priv->pfault_srcu);
 189#endif
 190
 191        spin_lock_irq(&priv->ctx_lock);
 192        list_del(&dev_ctx->list);
 193        spin_unlock_irq(&priv->ctx_lock);
 194
 195        if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
 196                intf->remove(dev, dev_ctx->context);
 197
 198        kfree(dev_ctx);
 199}
 200
 201static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
 202{
 203        struct mlx5_device_context *dev_ctx;
 204        struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
 205
 206        dev_ctx = mlx5_get_device(intf, priv);
 207        if (!dev_ctx)
 208                return;
 209
 210        delayed_event_start(priv);
 211        if (intf->attach) {
 212                if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
 213                        goto out;
 214                intf->attach(dev, dev_ctx->context);
 215                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
 216        } else {
 217                if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
 218                        goto out;
 219                dev_ctx->context = intf->add(dev);
 220                set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
 221        }
 222
 223out:
 224        delayed_event_release(dev_ctx, priv);
 225}
 226
 227void mlx5_attach_device(struct mlx5_core_dev *dev)
 228{
 229        struct mlx5_priv *priv = &dev->priv;
 230        struct mlx5_interface *intf;
 231
 232        mutex_lock(&mlx5_intf_mutex);
 233        list_for_each_entry(intf, &intf_list, list)
 234                mlx5_attach_interface(intf, priv);
 235        mutex_unlock(&mlx5_intf_mutex);
 236}
 237
 238static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
 239{
 240        struct mlx5_device_context *dev_ctx;
 241        struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
 242
 243        dev_ctx = mlx5_get_device(intf, priv);
 244        if (!dev_ctx)
 245                return;
 246
 247        if (intf->detach) {
 248                if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
 249                        return;
 250                intf->detach(dev, dev_ctx->context);
 251                clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
 252        } else {
 253                if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
 254                        return;
 255                intf->remove(dev, dev_ctx->context);
 256                clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
 257        }
 258}
 259
 260void mlx5_detach_device(struct mlx5_core_dev *dev)
 261{
 262        struct mlx5_priv *priv = &dev->priv;
 263        struct mlx5_interface *intf;
 264
 265        mutex_lock(&mlx5_intf_mutex);
 266        list_for_each_entry(intf, &intf_list, list)
 267                mlx5_detach_interface(intf, priv);
 268        mutex_unlock(&mlx5_intf_mutex);
 269}
 270
 271bool mlx5_device_registered(struct mlx5_core_dev *dev)
 272{
 273        struct mlx5_priv *priv;
 274        bool found = false;
 275
 276        mutex_lock(&mlx5_intf_mutex);
 277        list_for_each_entry(priv, &mlx5_dev_list, dev_list)
 278                if (priv == &dev->priv)
 279                        found = true;
 280        mutex_unlock(&mlx5_intf_mutex);
 281
 282        return found;
 283}
 284
 285int mlx5_register_device(struct mlx5_core_dev *dev)
 286{
 287        struct mlx5_priv *priv = &dev->priv;
 288        struct mlx5_interface *intf;
 289
 290        mutex_lock(&mlx5_intf_mutex);
 291        list_add_tail(&priv->dev_list, &mlx5_dev_list);
 292        list_for_each_entry(intf, &intf_list, list)
 293                mlx5_add_device(intf, priv);
 294        mutex_unlock(&mlx5_intf_mutex);
 295
 296        return 0;
 297}
 298
 299void mlx5_unregister_device(struct mlx5_core_dev *dev)
 300{
 301        struct mlx5_priv *priv = &dev->priv;
 302        struct mlx5_interface *intf;
 303
 304        mutex_lock(&mlx5_intf_mutex);
 305        list_for_each_entry(intf, &intf_list, list)
 306                mlx5_remove_device(intf, priv);
 307        list_del(&priv->dev_list);
 308        mutex_unlock(&mlx5_intf_mutex);
 309}
 310
 311int mlx5_register_interface(struct mlx5_interface *intf)
 312{
 313        struct mlx5_priv *priv;
 314
 315        if (!intf->add || !intf->remove)
 316                return -EINVAL;
 317
 318        mutex_lock(&mlx5_intf_mutex);
 319        list_add_tail(&intf->list, &intf_list);
 320        list_for_each_entry(priv, &mlx5_dev_list, dev_list)
 321                mlx5_add_device(intf, priv);
 322        mutex_unlock(&mlx5_intf_mutex);
 323
 324        return 0;
 325}
 326EXPORT_SYMBOL(mlx5_register_interface);
 327
 328void mlx5_unregister_interface(struct mlx5_interface *intf)
 329{
 330        struct mlx5_priv *priv;
 331
 332        mutex_lock(&mlx5_intf_mutex);
 333        list_for_each_entry(priv, &mlx5_dev_list, dev_list)
 334                mlx5_remove_device(intf, priv);
 335        list_del(&intf->list);
 336        mutex_unlock(&mlx5_intf_mutex);
 337}
 338EXPORT_SYMBOL(mlx5_unregister_interface);
 339
 340void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
 341{
 342        mutex_lock(&mlx5_intf_mutex);
 343        mlx5_remove_dev_by_protocol(mdev, protocol);
 344        mlx5_add_dev_by_protocol(mdev, protocol);
 345        mutex_unlock(&mlx5_intf_mutex);
 346}
 347
 348void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
 349{
 350        struct mlx5_priv *priv = &mdev->priv;
 351        struct mlx5_device_context *dev_ctx;
 352        unsigned long flags;
 353        void *result = NULL;
 354
 355        spin_lock_irqsave(&priv->ctx_lock, flags);
 356
 357        list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
 358                if ((dev_ctx->intf->protocol == protocol) &&
 359                    dev_ctx->intf->get_dev) {
 360                        result = dev_ctx->intf->get_dev(dev_ctx->context);
 361                        break;
 362                }
 363
 364        spin_unlock_irqrestore(&priv->ctx_lock, flags);
 365
 366        return result;
 367}
 368EXPORT_SYMBOL(mlx5_get_protocol_dev);
 369
 370/* Must be called with intf_mutex held */
 371void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
 372{
 373        struct mlx5_interface *intf;
 374
 375        list_for_each_entry(intf, &intf_list, list)
 376                if (intf->protocol == protocol) {
 377                        mlx5_add_device(intf, &dev->priv);
 378                        break;
 379                }
 380}
 381
 382/* Must be called with intf_mutex held */
 383void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
 384{
 385        struct mlx5_interface *intf;
 386
 387        list_for_each_entry(intf, &intf_list, list)
 388                if (intf->protocol == protocol) {
 389                        mlx5_remove_device(intf, &dev->priv);
 390                        break;
 391                }
 392}
 393
 394static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 395{
 396        return (u16)((dev->pdev->bus->number << 8) |
 397                     PCI_SLOT(dev->pdev->devfn));
 398}
 399
 400/* Must be called with intf_mutex held */
 401struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 402{
 403        u16 pci_id = mlx5_gen_pci_id(dev);
 404        struct mlx5_core_dev *res = NULL;
 405        struct mlx5_core_dev *tmp_dev;
 406        struct mlx5_priv *priv;
 407
 408        list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
 409                tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
 410                if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
 411                        res = tmp_dev;
 412                        break;
 413                }
 414        }
 415
 416        return res;
 417}
 418
 419void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
 420                     unsigned long param)
 421{
 422        struct mlx5_priv *priv = &dev->priv;
 423        struct mlx5_device_context *dev_ctx;
 424        unsigned long flags;
 425
 426        spin_lock_irqsave(&priv->ctx_lock, flags);
 427
 428        if (priv->is_accum_events)
 429                add_delayed_event(priv, dev, event, param);
 430
 431        /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
 432         * still in priv->ctx_list. In this case, only notify the dev_ctx if its
 433         * ADDED or ATTACHED bit are set.
 434         */
 435        list_for_each_entry(dev_ctx, &priv->ctx_list, list)
 436                if (dev_ctx->intf->event &&
 437                    (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
 438                     test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
 439                        dev_ctx->intf->event(dev, dev_ctx->context, event, param);
 440
 441        spin_unlock_irqrestore(&priv->ctx_lock, flags);
 442}
 443
 444#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 445void mlx5_core_page_fault(struct mlx5_core_dev *dev,
 446                          struct mlx5_pagefault *pfault)
 447{
 448        struct mlx5_priv *priv = &dev->priv;
 449        int srcu_idx;
 450
 451        srcu_idx = srcu_read_lock(&priv->pfault_srcu);
 452        if (priv->pfault)
 453                priv->pfault(dev, priv->pfault_ctx, pfault);
 454        srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
 455}
 456#endif
 457
 458void mlx5_dev_list_lock(void)
 459{
 460        mutex_lock(&mlx5_intf_mutex);
 461}
 462
 463void mlx5_dev_list_unlock(void)
 464{
 465        mutex_unlock(&mlx5_intf_mutex);
 466}
 467
 468int mlx5_dev_list_trylock(void)
 469{
 470        return mutex_trylock(&mlx5_intf_mutex);
 471}
 472