linux/drivers/net/ethernet/mellanox/mlx5/core/eq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/interrupt.h>
  34#include <linux/module.h>
  35#include <linux/mlx5/driver.h>
  36#include <linux/mlx5/cmd.h>
  37#include "mlx5_core.h"
  38#ifdef CONFIG_MLX5_CORE_EN
  39#include "eswitch.h"
  40#endif
  41
  42enum {
  43        MLX5_EQE_SIZE           = sizeof(struct mlx5_eqe),
  44        MLX5_EQE_OWNER_INIT_VAL = 0x1,
  45};
  46
  47enum {
  48        MLX5_EQ_STATE_ARMED             = 0x9,
  49        MLX5_EQ_STATE_FIRED             = 0xa,
  50        MLX5_EQ_STATE_ALWAYS_ARMED      = 0xb,
  51};
  52
  53enum {
  54        MLX5_NUM_SPARE_EQE      = 0x80,
  55        MLX5_NUM_ASYNC_EQE      = 0x100,
  56        MLX5_NUM_CMD_EQE        = 32,
  57};
  58
  59enum {
  60        MLX5_EQ_DOORBEL_OFFSET  = 0x40,
  61};
  62
  63#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)           | \
  64                               (1ull << MLX5_EVENT_TYPE_COMM_EST)           | \
  65                               (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)         | \
  66                               (1ull << MLX5_EVENT_TYPE_CQ_ERROR)           | \
  67                               (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)     | \
  68                               (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
  69                               (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  70                               (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
  71                               (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)        | \
  72                               (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
  73                               (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)       | \
  74                               (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
  75
  76struct map_eq_in {
  77        u64     mask;
  78        u32     reserved;
  79        u32     unmap_eqn;
  80};
  81
  82struct cre_des_eq {
  83        u8      reserved[15];
  84        u8      eqn;
  85};
  86
  87static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
  88{
  89        struct mlx5_destroy_eq_mbox_in in;
  90        struct mlx5_destroy_eq_mbox_out out;
  91        int err;
  92
  93        memset(&in, 0, sizeof(in));
  94        memset(&out, 0, sizeof(out));
  95        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
  96        in.eqn = eqn;
  97        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
  98        if (!err)
  99                goto ex;
 100
 101        if (out.hdr.status)
 102                err = mlx5_cmd_status_to_err(&out.hdr);
 103
 104ex:
 105        return err;
 106}
 107
 108static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
 109{
 110        return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
 111}
 112
 113static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
 114{
 115        struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
 116
 117        return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
 118}
 119
 120static const char *eqe_type_str(u8 type)
 121{
 122        switch (type) {
 123        case MLX5_EVENT_TYPE_COMP:
 124                return "MLX5_EVENT_TYPE_COMP";
 125        case MLX5_EVENT_TYPE_PATH_MIG:
 126                return "MLX5_EVENT_TYPE_PATH_MIG";
 127        case MLX5_EVENT_TYPE_COMM_EST:
 128                return "MLX5_EVENT_TYPE_COMM_EST";
 129        case MLX5_EVENT_TYPE_SQ_DRAINED:
 130                return "MLX5_EVENT_TYPE_SQ_DRAINED";
 131        case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 132                return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
 133        case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 134                return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
 135        case MLX5_EVENT_TYPE_CQ_ERROR:
 136                return "MLX5_EVENT_TYPE_CQ_ERROR";
 137        case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 138                return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
 139        case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 140                return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
 141        case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 142                return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
 143        case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 144                return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
 145        case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 146                return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
 147        case MLX5_EVENT_TYPE_INTERNAL_ERROR:
 148                return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
 149        case MLX5_EVENT_TYPE_PORT_CHANGE:
 150                return "MLX5_EVENT_TYPE_PORT_CHANGE";
 151        case MLX5_EVENT_TYPE_GPIO_EVENT:
 152                return "MLX5_EVENT_TYPE_GPIO_EVENT";
 153        case MLX5_EVENT_TYPE_REMOTE_CONFIG:
 154                return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
 155        case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
 156                return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
 157        case MLX5_EVENT_TYPE_STALL_EVENT:
 158                return "MLX5_EVENT_TYPE_STALL_EVENT";
 159        case MLX5_EVENT_TYPE_CMD:
 160                return "MLX5_EVENT_TYPE_CMD";
 161        case MLX5_EVENT_TYPE_PAGE_REQUEST:
 162                return "MLX5_EVENT_TYPE_PAGE_REQUEST";
 163        case MLX5_EVENT_TYPE_PAGE_FAULT:
 164                return "MLX5_EVENT_TYPE_PAGE_FAULT";
 165        default:
 166                return "Unrecognized event";
 167        }
 168}
 169
 170static enum mlx5_dev_event port_subtype_event(u8 subtype)
 171{
 172        switch (subtype) {
 173        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 174                return MLX5_DEV_EVENT_PORT_DOWN;
 175        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 176                return MLX5_DEV_EVENT_PORT_UP;
 177        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 178                return MLX5_DEV_EVENT_PORT_INITIALIZED;
 179        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 180                return MLX5_DEV_EVENT_LID_CHANGE;
 181        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 182                return MLX5_DEV_EVENT_PKEY_CHANGE;
 183        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 184                return MLX5_DEV_EVENT_GUID_CHANGE;
 185        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 186                return MLX5_DEV_EVENT_CLIENT_REREG;
 187        }
 188        return -1;
 189}
 190
 191static void eq_update_ci(struct mlx5_eq *eq, int arm)
 192{
 193        __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
 194        u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
 195        __raw_writel((__force u32) cpu_to_be32(val), addr);
 196        /* We still want ordering, just not swabbing, so add a barrier */
 197        mb();
 198}
 199
 200static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 201{
 202        struct mlx5_eqe *eqe;
 203        int eqes_found = 0;
 204        int set_ci = 0;
 205        u32 cqn = -1;
 206        u32 rsn;
 207        u8 port;
 208
 209        while ((eqe = next_eqe_sw(eq))) {
 210                /*
 211                 * Make sure we read EQ entry contents after we've
 212                 * checked the ownership bit.
 213                 */
 214                dma_rmb();
 215
 216                mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
 217                              eq->eqn, eqe_type_str(eqe->type));
 218                switch (eqe->type) {
 219                case MLX5_EVENT_TYPE_COMP:
 220                        cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
 221                        mlx5_cq_completion(dev, cqn);
 222                        break;
 223
 224                case MLX5_EVENT_TYPE_PATH_MIG:
 225                case MLX5_EVENT_TYPE_COMM_EST:
 226                case MLX5_EVENT_TYPE_SQ_DRAINED:
 227                case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 228                case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 229                case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 230                case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 231                case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 232                        rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 233                        rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
 234                        mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
 235                                      eqe_type_str(eqe->type), eqe->type, rsn);
 236                        mlx5_rsc_event(dev, rsn, eqe->type);
 237                        break;
 238
 239                case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 240                case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 241                        rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 242                        mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
 243                                      eqe_type_str(eqe->type), eqe->type, rsn);
 244                        mlx5_srq_event(dev, rsn, eqe->type);
 245                        break;
 246
 247                case MLX5_EVENT_TYPE_CMD:
 248                        mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
 249                        break;
 250
 251                case MLX5_EVENT_TYPE_PORT_CHANGE:
 252                        port = (eqe->data.port.port >> 4) & 0xf;
 253                        switch (eqe->sub_type) {
 254                        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 255                        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 256                        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 257                        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 258                        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 259                        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 260                        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 261                                if (dev->event)
 262                                        dev->event(dev, port_subtype_event(eqe->sub_type),
 263                                                   (unsigned long)port);
 264                                break;
 265                        default:
 266                                mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
 267                                               port, eqe->sub_type);
 268                        }
 269                        break;
 270                case MLX5_EVENT_TYPE_CQ_ERROR:
 271                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
 272                        mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
 273                                       cqn, eqe->data.cq_err.syndrome);
 274                        mlx5_cq_event(dev, cqn, eqe->type);
 275                        break;
 276
 277                case MLX5_EVENT_TYPE_PAGE_REQUEST:
 278                        {
 279                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
 280                                s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 281
 282                                mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
 283                                              func_id, npages);
 284                                mlx5_core_req_pages_handler(dev, func_id, npages);
 285                        }
 286                        break;
 287
 288#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 289                case MLX5_EVENT_TYPE_PAGE_FAULT:
 290                        mlx5_eq_pagefault(dev, eqe);
 291                        break;
 292#endif
 293
 294#ifdef CONFIG_MLX5_CORE_EN
 295                case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
 296                        mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
 297                        break;
 298#endif
 299                default:
 300                        mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
 301                                       eqe->type, eq->eqn);
 302                        break;
 303                }
 304
 305                ++eq->cons_index;
 306                eqes_found = 1;
 307                ++set_ci;
 308
 309                /* The HCA will think the queue has overflowed if we
 310                 * don't tell it we've been processing events.  We
 311                 * create our EQs with MLX5_NUM_SPARE_EQE extra
 312                 * entries, so we must update our consumer index at
 313                 * least that often.
 314                 */
 315                if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
 316                        eq_update_ci(eq, 0);
 317                        set_ci = 0;
 318                }
 319        }
 320
 321        eq_update_ci(eq, 1);
 322
 323        if (cqn != -1)
 324                tasklet_schedule(&eq->tasklet_ctx.task);
 325
 326        return eqes_found;
 327}
 328
 329static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
 330{
 331        struct mlx5_eq *eq = eq_ptr;
 332        struct mlx5_core_dev *dev = eq->dev;
 333
 334        mlx5_eq_int(dev, eq);
 335
 336        /* MSI-X vectors always belong to us */
 337        return IRQ_HANDLED;
 338}
 339
 340static void init_eq_buf(struct mlx5_eq *eq)
 341{
 342        struct mlx5_eqe *eqe;
 343        int i;
 344
 345        for (i = 0; i < eq->nent; i++) {
 346                eqe = get_eqe(eq, i);
 347                eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
 348        }
 349}
 350
 351int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 352                       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
 353{
 354        struct mlx5_priv *priv = &dev->priv;
 355        struct mlx5_create_eq_mbox_in *in;
 356        struct mlx5_create_eq_mbox_out out;
 357        int err;
 358        int inlen;
 359
 360        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
 361        eq->cons_index = 0;
 362        err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
 363        if (err)
 364                return err;
 365
 366        init_eq_buf(eq);
 367
 368        inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
 369        in = mlx5_vzalloc(inlen);
 370        if (!in) {
 371                err = -ENOMEM;
 372                goto err_buf;
 373        }
 374        memset(&out, 0, sizeof(out));
 375
 376        mlx5_fill_page_array(&eq->buf, in->pas);
 377
 378        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
 379        in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
 380        in->ctx.intr = vecidx;
 381        in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 382        in->events_mask = cpu_to_be64(mask);
 383
 384        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
 385        if (err)
 386                goto err_in;
 387
 388        if (out.hdr.status) {
 389                err = mlx5_cmd_status_to_err(&out.hdr);
 390                goto err_in;
 391        }
 392
 393        snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
 394                 name, pci_name(dev->pdev));
 395
 396        eq->eqn = out.eq_number;
 397        eq->irqn = priv->msix_arr[vecidx].vector;
 398        eq->dev = dev;
 399        eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
 400        err = request_irq(eq->irqn, mlx5_msix_handler, 0,
 401                          priv->irq_info[vecidx].name, eq);
 402        if (err)
 403                goto err_eq;
 404
 405        err = mlx5_debug_eq_add(dev, eq);
 406        if (err)
 407                goto err_irq;
 408
 409        INIT_LIST_HEAD(&eq->tasklet_ctx.list);
 410        INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
 411        spin_lock_init(&eq->tasklet_ctx.lock);
 412        tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
 413                     (unsigned long)&eq->tasklet_ctx);
 414
 415        /* EQs are created in ARMED state
 416         */
 417        eq_update_ci(eq, 1);
 418
 419        kvfree(in);
 420        return 0;
 421
 422err_irq:
 423        free_irq(priv->msix_arr[vecidx].vector, eq);
 424
 425err_eq:
 426        mlx5_cmd_destroy_eq(dev, eq->eqn);
 427
 428err_in:
 429        kvfree(in);
 430
 431err_buf:
 432        mlx5_buf_free(dev, &eq->buf);
 433        return err;
 434}
 435EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
 436
 437int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 438{
 439        int err;
 440
 441        mlx5_debug_eq_remove(dev, eq);
 442        free_irq(eq->irqn, eq);
 443        err = mlx5_cmd_destroy_eq(dev, eq->eqn);
 444        if (err)
 445                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
 446                               eq->eqn);
 447        synchronize_irq(eq->irqn);
 448        tasklet_disable(&eq->tasklet_ctx.task);
 449        mlx5_buf_free(dev, &eq->buf);
 450
 451        return err;
 452}
 453EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
 454
 455u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
 456{
 457        return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
 458}
 459
 460int mlx5_eq_init(struct mlx5_core_dev *dev)
 461{
 462        int err;
 463
 464        spin_lock_init(&dev->priv.eq_table.lock);
 465
 466        err = mlx5_eq_debugfs_init(dev);
 467
 468        return err;
 469}
 470
 471
 472void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
 473{
 474        mlx5_eq_debugfs_cleanup(dev);
 475}
 476
 477int mlx5_start_eqs(struct mlx5_core_dev *dev)
 478{
 479        struct mlx5_eq_table *table = &dev->priv.eq_table;
 480        u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
 481        int err;
 482
 483        if (MLX5_CAP_GEN(dev, pg))
 484                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
 485
 486        if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
 487            MLX5_CAP_GEN(dev, vport_group_manager) &&
 488            mlx5_core_is_pf(dev))
 489                async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
 490
 491        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
 492                                 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
 493                                 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
 494        if (err) {
 495                mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
 496                return err;
 497        }
 498
 499        mlx5_cmd_use_events(dev);
 500
 501        err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
 502                                 MLX5_NUM_ASYNC_EQE, async_event_mask,
 503                                 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
 504        if (err) {
 505                mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
 506                goto err1;
 507        }
 508
 509        err = mlx5_create_map_eq(dev, &table->pages_eq,
 510                                 MLX5_EQ_VEC_PAGES,
 511                                 /* TODO: sriov max_vf + */ 1,
 512                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
 513                                 &dev->priv.uuari.uars[0]);
 514        if (err) {
 515                mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
 516                goto err2;
 517        }
 518
 519        return err;
 520
 521err2:
 522        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 523
 524err1:
 525        mlx5_cmd_use_polling(dev);
 526        mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 527        return err;
 528}
 529
 530int mlx5_stop_eqs(struct mlx5_core_dev *dev)
 531{
 532        struct mlx5_eq_table *table = &dev->priv.eq_table;
 533        int err;
 534
 535        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
 536        if (err)
 537                return err;
 538
 539        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 540        mlx5_cmd_use_polling(dev);
 541
 542        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 543        if (err)
 544                mlx5_cmd_use_events(dev);
 545
 546        return err;
 547}
 548
 549int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 550                       struct mlx5_query_eq_mbox_out *out, int outlen)
 551{
 552        struct mlx5_query_eq_mbox_in in;
 553        int err;
 554
 555        memset(&in, 0, sizeof(in));
 556        memset(out, 0, outlen);
 557        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
 558        in.eqn = eq->eqn;
 559        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
 560        if (err)
 561                return err;
 562
 563        if (out->hdr.status)
 564                err = mlx5_cmd_status_to_err(&out->hdr);
 565
 566        return err;
 567}
 568EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
 569