linux/drivers/net/ethernet/mellanox/mlx5/core/eq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/interrupt.h>
  34#include <linux/module.h>
  35#include <linux/mlx5/driver.h>
  36#include <linux/mlx5/cmd.h>
  37#include "mlx5_core.h"
  38
  39enum {
  40        MLX5_EQE_SIZE           = sizeof(struct mlx5_eqe),
  41        MLX5_EQE_OWNER_INIT_VAL = 0x1,
  42};
  43
  44enum {
  45        MLX5_EQ_STATE_ARMED             = 0x9,
  46        MLX5_EQ_STATE_FIRED             = 0xa,
  47        MLX5_EQ_STATE_ALWAYS_ARMED      = 0xb,
  48};
  49
  50enum {
  51        MLX5_NUM_SPARE_EQE      = 0x80,
  52        MLX5_NUM_ASYNC_EQE      = 0x100,
  53        MLX5_NUM_CMD_EQE        = 32,
  54};
  55
  56enum {
  57        MLX5_EQ_DOORBEL_OFFSET  = 0x40,
  58};
  59
  60#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)           | \
  61                               (1ull << MLX5_EVENT_TYPE_COMM_EST)           | \
  62                               (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)         | \
  63                               (1ull << MLX5_EVENT_TYPE_CQ_ERROR)           | \
  64                               (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)     | \
  65                               (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
  66                               (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  67                               (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
  68                               (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)        | \
  69                               (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
  70                               (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)       | \
  71                               (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
  72
  73struct map_eq_in {
  74        u64     mask;
  75        u32     reserved;
  76        u32     unmap_eqn;
  77};
  78
  79struct cre_des_eq {
  80        u8      reserved[15];
  81        u8      eqn;
  82};
  83
  84static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
  85{
  86        struct mlx5_destroy_eq_mbox_in in;
  87        struct mlx5_destroy_eq_mbox_out out;
  88        int err;
  89
  90        memset(&in, 0, sizeof(in));
  91        memset(&out, 0, sizeof(out));
  92        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
  93        in.eqn = eqn;
  94        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
  95        if (!err)
  96                goto ex;
  97
  98        if (out.hdr.status)
  99                err = mlx5_cmd_status_to_err(&out.hdr);
 100
 101ex:
 102        return err;
 103}
 104
 105static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
 106{
 107        return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
 108}
 109
 110static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
 111{
 112        struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
 113
 114        return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
 115}
 116
 117static const char *eqe_type_str(u8 type)
 118{
 119        switch (type) {
 120        case MLX5_EVENT_TYPE_COMP:
 121                return "MLX5_EVENT_TYPE_COMP";
 122        case MLX5_EVENT_TYPE_PATH_MIG:
 123                return "MLX5_EVENT_TYPE_PATH_MIG";
 124        case MLX5_EVENT_TYPE_COMM_EST:
 125                return "MLX5_EVENT_TYPE_COMM_EST";
 126        case MLX5_EVENT_TYPE_SQ_DRAINED:
 127                return "MLX5_EVENT_TYPE_SQ_DRAINED";
 128        case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 129                return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
 130        case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 131                return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
 132        case MLX5_EVENT_TYPE_CQ_ERROR:
 133                return "MLX5_EVENT_TYPE_CQ_ERROR";
 134        case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 135                return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
 136        case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 137                return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
 138        case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 139                return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
 140        case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 141                return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
 142        case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 143                return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
 144        case MLX5_EVENT_TYPE_INTERNAL_ERROR:
 145                return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
 146        case MLX5_EVENT_TYPE_PORT_CHANGE:
 147                return "MLX5_EVENT_TYPE_PORT_CHANGE";
 148        case MLX5_EVENT_TYPE_GPIO_EVENT:
 149                return "MLX5_EVENT_TYPE_GPIO_EVENT";
 150        case MLX5_EVENT_TYPE_REMOTE_CONFIG:
 151                return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
 152        case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
 153                return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
 154        case MLX5_EVENT_TYPE_STALL_EVENT:
 155                return "MLX5_EVENT_TYPE_STALL_EVENT";
 156        case MLX5_EVENT_TYPE_CMD:
 157                return "MLX5_EVENT_TYPE_CMD";
 158        case MLX5_EVENT_TYPE_PAGE_REQUEST:
 159                return "MLX5_EVENT_TYPE_PAGE_REQUEST";
 160        case MLX5_EVENT_TYPE_PAGE_FAULT:
 161                return "MLX5_EVENT_TYPE_PAGE_FAULT";
 162        default:
 163                return "Unrecognized event";
 164        }
 165}
 166
 167static enum mlx5_dev_event port_subtype_event(u8 subtype)
 168{
 169        switch (subtype) {
 170        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 171                return MLX5_DEV_EVENT_PORT_DOWN;
 172        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 173                return MLX5_DEV_EVENT_PORT_UP;
 174        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 175                return MLX5_DEV_EVENT_PORT_INITIALIZED;
 176        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 177                return MLX5_DEV_EVENT_LID_CHANGE;
 178        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 179                return MLX5_DEV_EVENT_PKEY_CHANGE;
 180        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 181                return MLX5_DEV_EVENT_GUID_CHANGE;
 182        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 183                return MLX5_DEV_EVENT_CLIENT_REREG;
 184        }
 185        return -1;
 186}
 187
 188static void eq_update_ci(struct mlx5_eq *eq, int arm)
 189{
 190        __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
 191        u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
 192        __raw_writel((__force u32) cpu_to_be32(val), addr);
 193        /* We still want ordering, just not swabbing, so add a barrier */
 194        mb();
 195}
 196
 197static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 198{
 199        struct mlx5_eqe *eqe;
 200        int eqes_found = 0;
 201        int set_ci = 0;
 202        u32 cqn;
 203        u32 rsn;
 204        u8 port;
 205
 206        while ((eqe = next_eqe_sw(eq))) {
 207                /*
 208                 * Make sure we read EQ entry contents after we've
 209                 * checked the ownership bit.
 210                 */
 211                dma_rmb();
 212
 213                mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
 214                              eq->eqn, eqe_type_str(eqe->type));
 215                switch (eqe->type) {
 216                case MLX5_EVENT_TYPE_COMP:
 217                        cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
 218                        mlx5_cq_completion(dev, cqn);
 219                        break;
 220
 221                case MLX5_EVENT_TYPE_PATH_MIG:
 222                case MLX5_EVENT_TYPE_COMM_EST:
 223                case MLX5_EVENT_TYPE_SQ_DRAINED:
 224                case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 225                case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 226                case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 227                case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 228                case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 229                        rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 230                        mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
 231                                      eqe_type_str(eqe->type), eqe->type, rsn);
 232                        mlx5_rsc_event(dev, rsn, eqe->type);
 233                        break;
 234
 235                case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 236                case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 237                        rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 238                        mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
 239                                      eqe_type_str(eqe->type), eqe->type, rsn);
 240                        mlx5_srq_event(dev, rsn, eqe->type);
 241                        break;
 242
 243                case MLX5_EVENT_TYPE_CMD:
 244                        mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
 245                        break;
 246
 247                case MLX5_EVENT_TYPE_PORT_CHANGE:
 248                        port = (eqe->data.port.port >> 4) & 0xf;
 249                        switch (eqe->sub_type) {
 250                        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 251                        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 252                        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 253                        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 254                        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 255                        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 256                        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 257                                if (dev->event)
 258                                        dev->event(dev, port_subtype_event(eqe->sub_type),
 259                                                   (unsigned long)port);
 260                                break;
 261                        default:
 262                                mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
 263                                               port, eqe->sub_type);
 264                        }
 265                        break;
 266                case MLX5_EVENT_TYPE_CQ_ERROR:
 267                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
 268                        mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
 269                                       cqn, eqe->data.cq_err.syndrome);
 270                        mlx5_cq_event(dev, cqn, eqe->type);
 271                        break;
 272
 273                case MLX5_EVENT_TYPE_PAGE_REQUEST:
 274                        {
 275                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
 276                                s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 277
 278                                mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
 279                                              func_id, npages);
 280                                mlx5_core_req_pages_handler(dev, func_id, npages);
 281                        }
 282                        break;
 283
 284#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 285                case MLX5_EVENT_TYPE_PAGE_FAULT:
 286                        mlx5_eq_pagefault(dev, eqe);
 287                        break;
 288#endif
 289
 290                default:
 291                        mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
 292                                       eqe->type, eq->eqn);
 293                        break;
 294                }
 295
 296                ++eq->cons_index;
 297                eqes_found = 1;
 298                ++set_ci;
 299
 300                /* The HCA will think the queue has overflowed if we
 301                 * don't tell it we've been processing events.  We
 302                 * create our EQs with MLX5_NUM_SPARE_EQE extra
 303                 * entries, so we must update our consumer index at
 304                 * least that often.
 305                 */
 306                if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
 307                        eq_update_ci(eq, 0);
 308                        set_ci = 0;
 309                }
 310        }
 311
 312        eq_update_ci(eq, 1);
 313
 314        return eqes_found;
 315}
 316
 317static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
 318{
 319        struct mlx5_eq *eq = eq_ptr;
 320        struct mlx5_core_dev *dev = eq->dev;
 321
 322        mlx5_eq_int(dev, eq);
 323
 324        /* MSI-X vectors always belong to us */
 325        return IRQ_HANDLED;
 326}
 327
 328static void init_eq_buf(struct mlx5_eq *eq)
 329{
 330        struct mlx5_eqe *eqe;
 331        int i;
 332
 333        for (i = 0; i < eq->nent; i++) {
 334                eqe = get_eqe(eq, i);
 335                eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
 336        }
 337}
 338
 339int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 340                       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
 341{
 342        struct mlx5_priv *priv = &dev->priv;
 343        struct mlx5_create_eq_mbox_in *in;
 344        struct mlx5_create_eq_mbox_out out;
 345        int err;
 346        int inlen;
 347
 348        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
 349        err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
 350        if (err)
 351                return err;
 352
 353        init_eq_buf(eq);
 354
 355        inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
 356        in = mlx5_vzalloc(inlen);
 357        if (!in) {
 358                err = -ENOMEM;
 359                goto err_buf;
 360        }
 361        memset(&out, 0, sizeof(out));
 362
 363        mlx5_fill_page_array(&eq->buf, in->pas);
 364
 365        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
 366        in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
 367        in->ctx.intr = vecidx;
 368        in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 369        in->events_mask = cpu_to_be64(mask);
 370
 371        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
 372        if (err)
 373                goto err_in;
 374
 375        if (out.hdr.status) {
 376                err = mlx5_cmd_status_to_err(&out.hdr);
 377                goto err_in;
 378        }
 379
 380        snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
 381                 name, pci_name(dev->pdev));
 382
 383        eq->eqn = out.eq_number;
 384        eq->irqn = vecidx;
 385        eq->dev = dev;
 386        eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
 387        err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
 388                          priv->irq_info[vecidx].name, eq);
 389        if (err)
 390                goto err_eq;
 391
 392        err = mlx5_debug_eq_add(dev, eq);
 393        if (err)
 394                goto err_irq;
 395
 396        /* EQs are created in ARMED state
 397         */
 398        eq_update_ci(eq, 1);
 399
 400        kvfree(in);
 401        return 0;
 402
 403err_irq:
 404        free_irq(priv->msix_arr[vecidx].vector, eq);
 405
 406err_eq:
 407        mlx5_cmd_destroy_eq(dev, eq->eqn);
 408
 409err_in:
 410        kvfree(in);
 411
 412err_buf:
 413        mlx5_buf_free(dev, &eq->buf);
 414        return err;
 415}
 416EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
 417
 418int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 419{
 420        int err;
 421
 422        mlx5_debug_eq_remove(dev, eq);
 423        free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
 424        err = mlx5_cmd_destroy_eq(dev, eq->eqn);
 425        if (err)
 426                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
 427                               eq->eqn);
 428        synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
 429        mlx5_buf_free(dev, &eq->buf);
 430
 431        return err;
 432}
 433EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
 434
 435int mlx5_eq_init(struct mlx5_core_dev *dev)
 436{
 437        int err;
 438
 439        spin_lock_init(&dev->priv.eq_table.lock);
 440
 441        err = mlx5_eq_debugfs_init(dev);
 442
 443        return err;
 444}
 445
 446
 447void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
 448{
 449        mlx5_eq_debugfs_cleanup(dev);
 450}
 451
 452int mlx5_start_eqs(struct mlx5_core_dev *dev)
 453{
 454        struct mlx5_eq_table *table = &dev->priv.eq_table;
 455        u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
 456        int err;
 457
 458        if (MLX5_CAP_GEN(dev, pg))
 459                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
 460
 461        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
 462                                 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
 463                                 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
 464        if (err) {
 465                mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
 466                return err;
 467        }
 468
 469        mlx5_cmd_use_events(dev);
 470
 471        err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
 472                                 MLX5_NUM_ASYNC_EQE, async_event_mask,
 473                                 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
 474        if (err) {
 475                mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
 476                goto err1;
 477        }
 478
 479        err = mlx5_create_map_eq(dev, &table->pages_eq,
 480                                 MLX5_EQ_VEC_PAGES,
 481                                 /* TODO: sriov max_vf + */ 1,
 482                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
 483                                 &dev->priv.uuari.uars[0]);
 484        if (err) {
 485                mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
 486                goto err2;
 487        }
 488
 489        return err;
 490
 491err2:
 492        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 493
 494err1:
 495        mlx5_cmd_use_polling(dev);
 496        mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 497        return err;
 498}
 499
 500int mlx5_stop_eqs(struct mlx5_core_dev *dev)
 501{
 502        struct mlx5_eq_table *table = &dev->priv.eq_table;
 503        int err;
 504
 505        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
 506        if (err)
 507                return err;
 508
 509        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 510        mlx5_cmd_use_polling(dev);
 511
 512        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 513        if (err)
 514                mlx5_cmd_use_events(dev);
 515
 516        return err;
 517}
 518
 519int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 520                       struct mlx5_query_eq_mbox_out *out, int outlen)
 521{
 522        struct mlx5_query_eq_mbox_in in;
 523        int err;
 524
 525        memset(&in, 0, sizeof(in));
 526        memset(out, 0, outlen);
 527        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
 528        in.eqn = eq->eqn;
 529        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
 530        if (err)
 531                return err;
 532
 533        if (out->hdr.status)
 534                err = mlx5_cmd_status_to_err(&out->hdr);
 535
 536        return err;
 537}
 538EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
 539