linux/drivers/net/ethernet/mellanox/mlx5/core/srq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/module.h>
  35#include <linux/mlx5/driver.h>
  36#include <linux/mlx5/cmd.h>
  37#include <linux/mlx5/srq.h>
  38#include <rdma/ib_verbs.h>
  39#include "mlx5_core.h"
  40#include <linux/mlx5/transobj.h>
  41
  42void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
  43{
  44        struct mlx5_srq_table *table = &dev->priv.srq_table;
  45        struct mlx5_core_srq *srq;
  46
  47        spin_lock(&table->lock);
  48
  49        srq = radix_tree_lookup(&table->tree, srqn);
  50        if (srq)
  51                atomic_inc(&srq->refcount);
  52
  53        spin_unlock(&table->lock);
  54
  55        if (!srq) {
  56                mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
  57                return;
  58        }
  59
  60        srq->event(srq, event_type);
  61
  62        if (atomic_dec_and_test(&srq->refcount))
  63                complete(&srq->free);
  64}
  65
  66static int get_pas_size(struct mlx5_srq_attr *in)
  67{
  68        u32 log_page_size = in->log_page_size + 12;
  69        u32 log_srq_size  = in->log_size;
  70        u32 log_rq_stride = in->wqe_shift;
  71        u32 page_offset   = in->page_offset;
  72        u32 po_quanta     = 1 << (log_page_size - 6);
  73        u32 rq_sz         = 1 << (log_srq_size + 4 + log_rq_stride);
  74        u32 page_size     = 1 << log_page_size;
  75        u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
  76        u32 rq_num_pas    = (rq_sz_po + page_size - 1) / page_size;
  77
  78        return rq_num_pas * sizeof(u64);
  79}
  80
  81static void set_wq(void *wq, struct mlx5_srq_attr *in)
  82{
  83        MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
  84                 & MLX5_SRQ_FLAG_WQ_SIG));
  85        MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
  86        MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
  87        MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
  88        MLX5_SET(wq,   wq, page_offset,   in->page_offset);
  89        MLX5_SET(wq,   wq, lwm,           in->lwm);
  90        MLX5_SET(wq,   wq, pd,            in->pd);
  91        MLX5_SET64(wq, wq, dbr_addr,      in->db_record);
  92}
  93
  94static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
  95{
  96        MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
  97                 & MLX5_SRQ_FLAG_WQ_SIG));
  98        MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
  99        MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
 100        MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
 101        MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
 102        MLX5_SET(srqc,   srqc, lwm,           in->lwm);
 103        MLX5_SET(srqc,   srqc, pd,            in->pd);
 104        MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
 105        MLX5_SET(srqc,   srqc, xrcd,          in->xrcd);
 106        MLX5_SET(srqc,   srqc, cqn,           in->cqn);
 107}
 108
 109static void get_wq(void *wq, struct mlx5_srq_attr *in)
 110{
 111        if (MLX5_GET(wq, wq, wq_signature))
 112                in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
 113        in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
 114        in->wqe_shift     = MLX5_GET(wq,   wq, log_wq_stride) - 4;
 115        in->log_size      = MLX5_GET(wq,   wq, log_wq_sz);
 116        in->page_offset   = MLX5_GET(wq,   wq, page_offset);
 117        in->lwm           = MLX5_GET(wq,   wq, lwm);
 118        in->pd            = MLX5_GET(wq,   wq, pd);
 119        in->db_record     = MLX5_GET64(wq, wq, dbr_addr);
 120}
 121
 122static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
 123{
 124        if (MLX5_GET(srqc, srqc, wq_signature))
 125                in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
 126        in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
 127        in->wqe_shift     = MLX5_GET(srqc,   srqc, log_rq_stride);
 128        in->log_size      = MLX5_GET(srqc,   srqc, log_srq_size);
 129        in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
 130        in->lwm           = MLX5_GET(srqc,   srqc, lwm);
 131        in->pd            = MLX5_GET(srqc,   srqc, pd);
 132        in->db_record     = MLX5_GET64(srqc, srqc, dbr_addr);
 133}
 134
 135struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
 136{
 137        struct mlx5_srq_table *table = &dev->priv.srq_table;
 138        struct mlx5_core_srq *srq;
 139
 140        spin_lock(&table->lock);
 141
 142        srq = radix_tree_lookup(&table->tree, srqn);
 143        if (srq)
 144                atomic_inc(&srq->refcount);
 145
 146        spin_unlock(&table->lock);
 147
 148        return srq;
 149}
 150EXPORT_SYMBOL(mlx5_core_get_srq);
 151
 152static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 153                          struct mlx5_srq_attr *in)
 154{
 155        u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
 156        void *create_in;
 157        void *srqc;
 158        void *pas;
 159        int pas_size;
 160        int inlen;
 161        int err;
 162
 163        pas_size  = get_pas_size(in);
 164        inlen     = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
 165        create_in = mlx5_vzalloc(inlen);
 166        if (!create_in)
 167                return -ENOMEM;
 168
 169        srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
 170        pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
 171
 172        set_srqc(srqc, in);
 173        memcpy(pas, in->pas, pas_size);
 174
 175        MLX5_SET(create_srq_in, create_in, opcode,
 176                 MLX5_CMD_OP_CREATE_SRQ);
 177
 178        err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
 179                            sizeof(create_out));
 180        kvfree(create_in);
 181        if (!err)
 182                srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
 183
 184        return err;
 185}
 186
 187static int destroy_srq_cmd(struct mlx5_core_dev *dev,
 188                           struct mlx5_core_srq *srq)
 189{
 190        u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
 191        u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
 192
 193        MLX5_SET(destroy_srq_in, srq_in, opcode,
 194                 MLX5_CMD_OP_DESTROY_SRQ);
 195        MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
 196
 197        return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
 198                             srq_out, sizeof(srq_out));
 199}
 200
 201static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 202                       u16 lwm, int is_srq)
 203{
 204        /* arm_srq structs missing using identical xrc ones */
 205        u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
 206        u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
 207
 208        MLX5_SET(arm_xrc_srq_in, srq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
 209        MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
 210        MLX5_SET(arm_xrc_srq_in, srq_in, lwm,      lwm);
 211
 212        return  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
 213                              srq_out, sizeof(srq_out));
 214}
 215
 216static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 217                         struct mlx5_srq_attr *out)
 218{
 219        u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
 220        u32 *srq_out;
 221        void *srqc;
 222        int err;
 223
 224        srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
 225        if (!srq_out)
 226                return -ENOMEM;
 227
 228        MLX5_SET(query_srq_in, srq_in, opcode,
 229                 MLX5_CMD_OP_QUERY_SRQ);
 230        MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
 231        err =  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
 232                             srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
 233        if (err)
 234                goto out;
 235
 236        srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
 237        get_srqc(srqc, out);
 238        if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
 239                out->flags |= MLX5_SRQ_FLAG_ERR;
 240out:
 241        kvfree(srq_out);
 242        return err;
 243}
 244
 245static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
 246                              struct mlx5_core_srq *srq,
 247                              struct mlx5_srq_attr *in)
 248{
 249        u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
 250        void *create_in;
 251        void *xrc_srqc;
 252        void *pas;
 253        int pas_size;
 254        int inlen;
 255        int err;
 256
 257        pas_size  = get_pas_size(in);
 258        inlen     = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
 259        create_in = mlx5_vzalloc(inlen);
 260        if (!create_in)
 261                return -ENOMEM;
 262
 263        xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
 264                                xrc_srq_context_entry);
 265        pas      = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
 266
 267        set_srqc(xrc_srqc, in);
 268        MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
 269        memcpy(pas, in->pas, pas_size);
 270        MLX5_SET(create_xrc_srq_in, create_in, opcode,
 271                 MLX5_CMD_OP_CREATE_XRC_SRQ);
 272
 273        memset(create_out, 0, sizeof(create_out));
 274        err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
 275                            sizeof(create_out));
 276        if (err)
 277                goto out;
 278
 279        srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
 280out:
 281        kvfree(create_in);
 282        return err;
 283}
 284
 285static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
 286                               struct mlx5_core_srq *srq)
 287{
 288        u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
 289        u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
 290
 291        MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
 292                 MLX5_CMD_OP_DESTROY_XRC_SRQ);
 293        MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
 294
 295        return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
 296                             xrcsrq_out, sizeof(xrcsrq_out));
 297}
 298
 299static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
 300                           struct mlx5_core_srq *srq, u16 lwm)
 301{
 302        u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
 303        u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
 304
 305        MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
 306        MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
 307        MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
 308        MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
 309
 310        return  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
 311                              xrcsrq_out, sizeof(xrcsrq_out));
 312}
 313
 314static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
 315                             struct mlx5_core_srq *srq,
 316                             struct mlx5_srq_attr *out)
 317{
 318        u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
 319        u32 *xrcsrq_out;
 320        void *xrc_srqc;
 321        int err;
 322
 323        xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
 324        if (!xrcsrq_out)
 325                return -ENOMEM;
 326        memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
 327
 328        MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
 329                 MLX5_CMD_OP_QUERY_XRC_SRQ);
 330        MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
 331
 332        err =  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
 333                             MLX5_ST_SZ_BYTES(query_xrc_srq_out));
 334        if (err)
 335                goto out;
 336
 337        xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
 338                                xrc_srq_context_entry);
 339        get_srqc(xrc_srqc, out);
 340        if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
 341                out->flags |= MLX5_SRQ_FLAG_ERR;
 342
 343out:
 344        kvfree(xrcsrq_out);
 345        return err;
 346}
 347
 348static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 349                          struct mlx5_srq_attr *in)
 350{
 351        void *create_in;
 352        void *rmpc;
 353        void *wq;
 354        int pas_size;
 355        int inlen;
 356        int err;
 357
 358        pas_size = get_pas_size(in);
 359        inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
 360        create_in = mlx5_vzalloc(inlen);
 361        if (!create_in)
 362                return -ENOMEM;
 363
 364        rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
 365        wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
 366
 367        MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
 368        set_wq(wq, in);
 369        memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
 370
 371        err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
 372
 373        kvfree(create_in);
 374        return err;
 375}
 376
 377static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
 378                           struct mlx5_core_srq *srq)
 379{
 380        return mlx5_core_destroy_rmp(dev, srq->srqn);
 381}
 382
 383static int arm_rmp_cmd(struct mlx5_core_dev *dev,
 384                       struct mlx5_core_srq *srq,
 385                       u16 lwm)
 386{
 387        void *in;
 388        void *rmpc;
 389        void *wq;
 390        void *bitmask;
 391        int err;
 392
 393        in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
 394        if (!in)
 395                return -ENOMEM;
 396
 397        rmpc =    MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
 398        bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
 399        wq   =    MLX5_ADDR_OF(rmpc,            rmpc, wq);
 400
 401        MLX5_SET(modify_rmp_in, in,      rmp_state, MLX5_RMPC_STATE_RDY);
 402        MLX5_SET(modify_rmp_in, in,      rmpn,      srq->srqn);
 403        MLX5_SET(wq,            wq,      lwm,       lwm);
 404        MLX5_SET(rmp_bitmask,   bitmask, lwm,       1);
 405        MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
 406
 407        err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
 408
 409        kvfree(in);
 410        return err;
 411}
 412
 413static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 414                         struct mlx5_srq_attr *out)
 415{
 416        u32 *rmp_out;
 417        void *rmpc;
 418        int err;
 419
 420        rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
 421        if (!rmp_out)
 422                return -ENOMEM;
 423
 424        err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
 425        if (err)
 426                goto out;
 427
 428        rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
 429        get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
 430        if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
 431                out->flags |= MLX5_SRQ_FLAG_ERR;
 432
 433out:
 434        kvfree(rmp_out);
 435        return err;
 436}
 437
 438static int create_srq_split(struct mlx5_core_dev *dev,
 439                            struct mlx5_core_srq *srq,
 440                            struct mlx5_srq_attr *in)
 441{
 442        if (!dev->issi)
 443                return create_srq_cmd(dev, srq, in);
 444        else if (srq->common.res == MLX5_RES_XSRQ)
 445                return create_xrc_srq_cmd(dev, srq, in);
 446        else
 447                return create_rmp_cmd(dev, srq, in);
 448}
 449
 450static int destroy_srq_split(struct mlx5_core_dev *dev,
 451                             struct mlx5_core_srq *srq)
 452{
 453        if (!dev->issi)
 454                return destroy_srq_cmd(dev, srq);
 455        else if (srq->common.res == MLX5_RES_XSRQ)
 456                return destroy_xrc_srq_cmd(dev, srq);
 457        else
 458                return destroy_rmp_cmd(dev, srq);
 459}
 460
 461int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 462                         struct mlx5_srq_attr *in)
 463{
 464        int err;
 465        struct mlx5_srq_table *table = &dev->priv.srq_table;
 466
 467        if (in->type == IB_SRQT_XRC)
 468                srq->common.res = MLX5_RES_XSRQ;
 469        else
 470                srq->common.res = MLX5_RES_SRQ;
 471
 472        err = create_srq_split(dev, srq, in);
 473        if (err)
 474                return err;
 475
 476        atomic_set(&srq->refcount, 1);
 477        init_completion(&srq->free);
 478
 479        spin_lock_irq(&table->lock);
 480        err = radix_tree_insert(&table->tree, srq->srqn, srq);
 481        spin_unlock_irq(&table->lock);
 482        if (err) {
 483                mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
 484                goto err_destroy_srq_split;
 485        }
 486
 487        return 0;
 488
 489err_destroy_srq_split:
 490        destroy_srq_split(dev, srq);
 491
 492        return err;
 493}
 494EXPORT_SYMBOL(mlx5_core_create_srq);
 495
 496int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
 497{
 498        struct mlx5_srq_table *table = &dev->priv.srq_table;
 499        struct mlx5_core_srq *tmp;
 500        int err;
 501
 502        spin_lock_irq(&table->lock);
 503        tmp = radix_tree_delete(&table->tree, srq->srqn);
 504        spin_unlock_irq(&table->lock);
 505        if (!tmp) {
 506                mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
 507                return -EINVAL;
 508        }
 509        if (tmp != srq) {
 510                mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
 511                return -EINVAL;
 512        }
 513
 514        err = destroy_srq_split(dev, srq);
 515        if (err)
 516                return err;
 517
 518        if (atomic_dec_and_test(&srq->refcount))
 519                complete(&srq->free);
 520        wait_for_completion(&srq->free);
 521
 522        return 0;
 523}
 524EXPORT_SYMBOL(mlx5_core_destroy_srq);
 525
 526int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 527                        struct mlx5_srq_attr *out)
 528{
 529        if (!dev->issi)
 530                return query_srq_cmd(dev, srq, out);
 531        else if (srq->common.res == MLX5_RES_XSRQ)
 532                return query_xrc_srq_cmd(dev, srq, out);
 533        else
 534                return query_rmp_cmd(dev, srq, out);
 535}
 536EXPORT_SYMBOL(mlx5_core_query_srq);
 537
 538int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 539                      u16 lwm, int is_srq)
 540{
 541        if (!dev->issi)
 542                return arm_srq_cmd(dev, srq, lwm, is_srq);
 543        else if (srq->common.res == MLX5_RES_XSRQ)
 544                return arm_xrc_srq_cmd(dev, srq, lwm);
 545        else
 546                return arm_rmp_cmd(dev, srq, lwm);
 547}
 548EXPORT_SYMBOL(mlx5_core_arm_srq);
 549
 550void mlx5_init_srq_table(struct mlx5_core_dev *dev)
 551{
 552        struct mlx5_srq_table *table = &dev->priv.srq_table;
 553
 554        memset(table, 0, sizeof(*table));
 555        spin_lock_init(&table->lock);
 556        INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 557}
 558
 559void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
 560{
 561        /* nothing */
 562}
 563