linux/drivers/infiniband/hw/mlx5/qpc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
   4 */
   5
   6#include <linux/gfp.h>
   7#include <linux/mlx5/qp.h>
   8#include <linux/mlx5/driver.h>
   9#include "mlx5_ib.h"
  10#include "qp.h"
  11
  12static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
  13                               struct mlx5_core_dct *dct);
  14
  15static struct mlx5_core_rsc_common *
  16mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
  17{
  18        struct mlx5_core_rsc_common *common;
  19        unsigned long flags;
  20
  21        spin_lock_irqsave(&table->lock, flags);
  22
  23        common = radix_tree_lookup(&table->tree, rsn);
  24        if (common)
  25                refcount_inc(&common->refcount);
  26
  27        spin_unlock_irqrestore(&table->lock, flags);
  28
  29        return common;
  30}
  31
  32void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
  33{
  34        if (refcount_dec_and_test(&common->refcount))
  35                complete(&common->free);
  36}
  37
  38static u64 qp_allowed_event_types(void)
  39{
  40        u64 mask;
  41
  42        mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
  43               BIT(MLX5_EVENT_TYPE_COMM_EST) |
  44               BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
  45               BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
  46               BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
  47               BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
  48               BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
  49               BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
  50
  51        return mask;
  52}
  53
  54static u64 rq_allowed_event_types(void)
  55{
  56        u64 mask;
  57
  58        mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
  59               BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
  60
  61        return mask;
  62}
  63
  64static u64 sq_allowed_event_types(void)
  65{
  66        return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
  67}
  68
  69static u64 dct_allowed_event_types(void)
  70{
  71        return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
  72}
  73
  74static bool is_event_type_allowed(int rsc_type, int event_type)
  75{
  76        switch (rsc_type) {
  77        case MLX5_EVENT_QUEUE_TYPE_QP:
  78                return BIT(event_type) & qp_allowed_event_types();
  79        case MLX5_EVENT_QUEUE_TYPE_RQ:
  80                return BIT(event_type) & rq_allowed_event_types();
  81        case MLX5_EVENT_QUEUE_TYPE_SQ:
  82                return BIT(event_type) & sq_allowed_event_types();
  83        case MLX5_EVENT_QUEUE_TYPE_DCT:
  84                return BIT(event_type) & dct_allowed_event_types();
  85        default:
  86                WARN(1, "Event arrived for unknown resource type");
  87                return false;
  88        }
  89}
  90
  91static int rsc_event_notifier(struct notifier_block *nb,
  92                              unsigned long type, void *data)
  93{
  94        struct mlx5_core_rsc_common *common;
  95        struct mlx5_qp_table *table;
  96        struct mlx5_core_dct *dct;
  97        u8 event_type = (u8)type;
  98        struct mlx5_core_qp *qp;
  99        struct mlx5_eqe *eqe;
 100        u32 rsn;
 101
 102        switch (event_type) {
 103        case MLX5_EVENT_TYPE_DCT_DRAINED:
 104                eqe = data;
 105                rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
 106                rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
 107                break;
 108        case MLX5_EVENT_TYPE_PATH_MIG:
 109        case MLX5_EVENT_TYPE_COMM_EST:
 110        case MLX5_EVENT_TYPE_SQ_DRAINED:
 111        case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 112        case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 113        case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 114        case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 115        case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 116                eqe = data;
 117                rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 118                rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
 119                break;
 120        default:
 121                return NOTIFY_DONE;
 122        }
 123
 124        table = container_of(nb, struct mlx5_qp_table, nb);
 125        common = mlx5_get_rsc(table, rsn);
 126        if (!common)
 127                return NOTIFY_OK;
 128
 129        if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
 130                goto out;
 131
 132        switch (common->res) {
 133        case MLX5_RES_QP:
 134        case MLX5_RES_RQ:
 135        case MLX5_RES_SQ:
 136                qp = (struct mlx5_core_qp *)common;
 137                qp->event(qp, event_type);
 138                break;
 139        case MLX5_RES_DCT:
 140                dct = (struct mlx5_core_dct *)common;
 141                if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
 142                        complete(&dct->drained);
 143                break;
 144        default:
 145                break;
 146        }
 147out:
 148        mlx5_core_put_rsc(common);
 149
 150        return NOTIFY_OK;
 151}
 152
 153static int create_resource_common(struct mlx5_ib_dev *dev,
 154                                  struct mlx5_core_qp *qp, int rsc_type)
 155{
 156        struct mlx5_qp_table *table = &dev->qp_table;
 157        int err;
 158
 159        qp->common.res = rsc_type;
 160        spin_lock_irq(&table->lock);
 161        err = radix_tree_insert(&table->tree,
 162                                qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
 163                                qp);
 164        spin_unlock_irq(&table->lock);
 165        if (err)
 166                return err;
 167
 168        refcount_set(&qp->common.refcount, 1);
 169        init_completion(&qp->common.free);
 170        qp->pid = current->pid;
 171
 172        return 0;
 173}
 174
 175static void destroy_resource_common(struct mlx5_ib_dev *dev,
 176                                    struct mlx5_core_qp *qp)
 177{
 178        struct mlx5_qp_table *table = &dev->qp_table;
 179        unsigned long flags;
 180
 181        spin_lock_irqsave(&table->lock, flags);
 182        radix_tree_delete(&table->tree,
 183                          qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
 184        spin_unlock_irqrestore(&table->lock, flags);
 185        mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
 186        wait_for_completion(&qp->common.free);
 187}
 188
 189static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
 190                                  struct mlx5_core_dct *dct, bool need_cleanup)
 191{
 192        u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
 193        struct mlx5_core_qp *qp = &dct->mqp;
 194        int err;
 195
 196        err = mlx5_core_drain_dct(dev, dct);
 197        if (err) {
 198                if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
 199                        goto destroy;
 200
 201                return err;
 202        }
 203        wait_for_completion(&dct->drained);
 204destroy:
 205        if (need_cleanup)
 206                destroy_resource_common(dev, &dct->mqp);
 207        MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
 208        MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
 209        MLX5_SET(destroy_dct_in, in, uid, qp->uid);
 210        err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
 211        return err;
 212}
 213
 214int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
 215                         u32 *in, int inlen, u32 *out, int outlen)
 216{
 217        struct mlx5_core_qp *qp = &dct->mqp;
 218        int err;
 219
 220        init_completion(&dct->drained);
 221        MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
 222
 223        err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
 224        if (err)
 225                return err;
 226
 227        qp->qpn = MLX5_GET(create_dct_out, out, dctn);
 228        qp->uid = MLX5_GET(create_dct_in, in, uid);
 229        err = create_resource_common(dev, qp, MLX5_RES_DCT);
 230        if (err)
 231                goto err_cmd;
 232
 233        return 0;
 234err_cmd:
 235        _mlx5_core_destroy_dct(dev, dct, false);
 236        return err;
 237}
 238
 239int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
 240                       u32 *in, int inlen, u32 *out)
 241{
 242        u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
 243        int err;
 244
 245        MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
 246
 247        err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
 248                            MLX5_ST_SZ_BYTES(create_qp_out));
 249        if (err)
 250                return err;
 251
 252        qp->uid = MLX5_GET(create_qp_in, in, uid);
 253        qp->qpn = MLX5_GET(create_qp_out, out, qpn);
 254
 255        err = create_resource_common(dev, qp, MLX5_RES_QP);
 256        if (err)
 257                goto err_cmd;
 258
 259        mlx5_debug_qp_add(dev->mdev, qp);
 260
 261        return 0;
 262
 263err_cmd:
 264        MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
 265        MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
 266        MLX5_SET(destroy_qp_in, din, uid, qp->uid);
 267        mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
 268        return err;
 269}
 270
 271static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
 272                               struct mlx5_core_dct *dct)
 273{
 274        u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
 275        struct mlx5_core_qp *qp = &dct->mqp;
 276
 277        MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
 278        MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
 279        MLX5_SET(drain_dct_in, in, uid, qp->uid);
 280        return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
 281}
 282
 283int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
 284                          struct mlx5_core_dct *dct)
 285{
 286        return _mlx5_core_destroy_dct(dev, dct, true);
 287}
 288
 289int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
 290{
 291        u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
 292
 293        mlx5_debug_qp_remove(dev->mdev, qp);
 294
 295        destroy_resource_common(dev, qp);
 296
 297        MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
 298        MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
 299        MLX5_SET(destroy_qp_in, in, uid, qp->uid);
 300        mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
 301        return 0;
 302}
 303
 304int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
 305                             u32 timeout_usec)
 306{
 307        u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
 308
 309        MLX5_SET(set_delay_drop_params_in, in, opcode,
 310                 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
 311        MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
 312                 timeout_usec / 100);
 313        return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
 314}
 315
 316struct mbox_info {
 317        u32 *in;
 318        u32 *out;
 319        int inlen;
 320        int outlen;
 321};
 322
 323static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
 324{
 325        mbox->inlen  = inlen;
 326        mbox->outlen = outlen;
 327        mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
 328        mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
 329        if (!mbox->in || !mbox->out) {
 330                kfree(mbox->in);
 331                kfree(mbox->out);
 332                return -ENOMEM;
 333        }
 334
 335        return 0;
 336}
 337
 338static void mbox_free(struct mbox_info *mbox)
 339{
 340        kfree(mbox->in);
 341        kfree(mbox->out);
 342}
 343
 344static int get_ece_from_mbox(void *out, u16 opcode)
 345{
 346        int ece = 0;
 347
 348        switch (opcode) {
 349        case MLX5_CMD_OP_INIT2INIT_QP:
 350                ece = MLX5_GET(init2init_qp_out, out, ece);
 351                break;
 352        case MLX5_CMD_OP_INIT2RTR_QP:
 353                ece = MLX5_GET(init2rtr_qp_out, out, ece);
 354                break;
 355        case MLX5_CMD_OP_RTR2RTS_QP:
 356                ece = MLX5_GET(rtr2rts_qp_out, out, ece);
 357                break;
 358        case MLX5_CMD_OP_RTS2RTS_QP:
 359                ece = MLX5_GET(rts2rts_qp_out, out, ece);
 360                break;
 361        case MLX5_CMD_OP_RST2INIT_QP:
 362                ece = MLX5_GET(rst2init_qp_out, out, ece);
 363                break;
 364        default:
 365                break;
 366        }
 367
 368        return ece;
 369}
 370
 371static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
 372                                u32 opt_param_mask, void *qpc,
 373                                struct mbox_info *mbox, u16 uid, u32 ece)
 374{
 375        mbox->out = NULL;
 376        mbox->in = NULL;
 377
 378#define MBOX_ALLOC(mbox, typ)  \
 379        mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
 380
 381#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid)                            \
 382        do {                                                                   \
 383                MLX5_SET(typ##_in, in, opcode, _opcode);                       \
 384                MLX5_SET(typ##_in, in, qpn, _qpn);                             \
 385                MLX5_SET(typ##_in, in, uid, _uid);                             \
 386        } while (0)
 387
 388#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid)          \
 389        do {                                                                   \
 390                MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid);                   \
 391                MLX5_SET(typ##_in, in, opt_param_mask, _opt_p);                \
 392                memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc,                  \
 393                       MLX5_ST_SZ_BYTES(qpc));                                 \
 394        } while (0)
 395
 396        switch (opcode) {
 397        /* 2RST & 2ERR */
 398        case MLX5_CMD_OP_2RST_QP:
 399                if (MBOX_ALLOC(mbox, qp_2rst))
 400                        return -ENOMEM;
 401                MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
 402                break;
 403        case MLX5_CMD_OP_2ERR_QP:
 404                if (MBOX_ALLOC(mbox, qp_2err))
 405                        return -ENOMEM;
 406                MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
 407                break;
 408
 409        /* MODIFY with QPC */
 410        case MLX5_CMD_OP_RST2INIT_QP:
 411                if (MBOX_ALLOC(mbox, rst2init_qp))
 412                        return -ENOMEM;
 413                MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
 414                                  opt_param_mask, qpc, uid);
 415                MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
 416                break;
 417        case MLX5_CMD_OP_INIT2RTR_QP:
 418                if (MBOX_ALLOC(mbox, init2rtr_qp))
 419                        return -ENOMEM;
 420                MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
 421                                  opt_param_mask, qpc, uid);
 422                MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
 423                break;
 424        case MLX5_CMD_OP_RTR2RTS_QP:
 425                if (MBOX_ALLOC(mbox, rtr2rts_qp))
 426                        return -ENOMEM;
 427                MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
 428                                  opt_param_mask, qpc, uid);
 429                MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
 430                break;
 431        case MLX5_CMD_OP_RTS2RTS_QP:
 432                if (MBOX_ALLOC(mbox, rts2rts_qp))
 433                        return -ENOMEM;
 434                MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
 435                                  opt_param_mask, qpc, uid);
 436                MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
 437                break;
 438        case MLX5_CMD_OP_SQERR2RTS_QP:
 439                if (MBOX_ALLOC(mbox, sqerr2rts_qp))
 440                        return -ENOMEM;
 441                MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
 442                                  opt_param_mask, qpc, uid);
 443                break;
 444        case MLX5_CMD_OP_SQD_RTS_QP:
 445                if (MBOX_ALLOC(mbox, sqd2rts_qp))
 446                        return -ENOMEM;
 447                MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn,
 448                                  opt_param_mask, qpc, uid);
 449                break;
 450        case MLX5_CMD_OP_INIT2INIT_QP:
 451                if (MBOX_ALLOC(mbox, init2init_qp))
 452                        return -ENOMEM;
 453                MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
 454                                  opt_param_mask, qpc, uid);
 455                MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
 456                break;
 457        default:
 458                return -EINVAL;
 459        }
 460        return 0;
 461}
 462
 463int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
 464                        void *qpc, struct mlx5_core_qp *qp, u32 *ece)
 465{
 466        struct mbox_info mbox;
 467        int err;
 468
 469        err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
 470                                   qpc, &mbox, qp->uid, (ece) ? *ece : 0);
 471        if (err)
 472                return err;
 473
 474        err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
 475                            mbox.outlen);
 476
 477        if (ece)
 478                *ece = get_ece_from_mbox(mbox.out, opcode);
 479
 480        mbox_free(&mbox);
 481        return err;
 482}
 483
 484int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
 485{
 486        struct mlx5_qp_table *table = &dev->qp_table;
 487
 488        spin_lock_init(&table->lock);
 489        INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 490        mlx5_qp_debugfs_init(dev->mdev);
 491
 492        table->nb.notifier_call = rsc_event_notifier;
 493        mlx5_notifier_register(dev->mdev, &table->nb);
 494
 495        return 0;
 496}
 497
 498void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
 499{
 500        struct mlx5_qp_table *table = &dev->qp_table;
 501
 502        mlx5_notifier_unregister(dev->mdev, &table->nb);
 503        mlx5_qp_debugfs_cleanup(dev->mdev);
 504}
 505
 506int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
 507                       u32 *out, int outlen)
 508{
 509        u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
 510
 511        MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
 512        MLX5_SET(query_qp_in, in, qpn, qp->qpn);
 513        return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
 514}
 515
 516int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
 517                        u32 *out, int outlen)
 518{
 519        u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
 520        struct mlx5_core_qp *qp = &dct->mqp;
 521
 522        MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
 523        MLX5_SET(query_dct_in, in, dctn, qp->qpn);
 524
 525        return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
 526                             outlen);
 527}
 528
 529int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
 530{
 531        u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
 532        u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
 533        int err;
 534
 535        MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
 536        err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
 537        if (!err)
 538                *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
 539        return err;
 540}
 541
 542int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
 543{
 544        u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
 545
 546        MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
 547        MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
 548        return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
 549}
 550
 551static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
 552{
 553        u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
 554
 555        MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
 556        MLX5_SET(destroy_rq_in, in, rqn, rqn);
 557        MLX5_SET(destroy_rq_in, in, uid, uid);
 558        mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
 559}
 560
 561int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
 562                                struct mlx5_core_qp *rq)
 563{
 564        int err;
 565        u32 rqn;
 566
 567        err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
 568        if (err)
 569                return err;
 570
 571        rq->uid = MLX5_GET(create_rq_in, in, uid);
 572        rq->qpn = rqn;
 573        err = create_resource_common(dev, rq, MLX5_RES_RQ);
 574        if (err)
 575                goto err_destroy_rq;
 576
 577        return 0;
 578
 579err_destroy_rq:
 580        destroy_rq_tracked(dev, rq->qpn, rq->uid);
 581
 582        return err;
 583}
 584
 585int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
 586                                 struct mlx5_core_qp *rq)
 587{
 588        destroy_resource_common(dev, rq);
 589        destroy_rq_tracked(dev, rq->qpn, rq->uid);
 590        return 0;
 591}
 592
 593static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
 594{
 595        u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
 596
 597        MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
 598        MLX5_SET(destroy_sq_in, in, sqn, sqn);
 599        MLX5_SET(destroy_sq_in, in, uid, uid);
 600        mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
 601}
 602
 603int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
 604                                struct mlx5_core_qp *sq)
 605{
 606        u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
 607        int err;
 608
 609        MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
 610        err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
 611        if (err)
 612                return err;
 613
 614        sq->qpn = MLX5_GET(create_sq_out, out, sqn);
 615        sq->uid = MLX5_GET(create_sq_in, in, uid);
 616        err = create_resource_common(dev, sq, MLX5_RES_SQ);
 617        if (err)
 618                goto err_destroy_sq;
 619
 620        return 0;
 621
 622err_destroy_sq:
 623        destroy_sq_tracked(dev, sq->qpn, sq->uid);
 624
 625        return err;
 626}
 627
 628void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
 629                                  struct mlx5_core_qp *sq)
 630{
 631        destroy_resource_common(dev, sq);
 632        destroy_sq_tracked(dev, sq->qpn, sq->uid);
 633}
 634
 635struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
 636                                                int res_num,
 637                                                enum mlx5_res_type res_type)
 638{
 639        u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
 640        struct mlx5_qp_table *table = &dev->qp_table;
 641
 642        return mlx5_get_rsc(table, rsn);
 643}
 644
 645void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
 646{
 647        mlx5_core_put_rsc(res);
 648}
 649