linux/drivers/net/ethernet/mellanox/mlx5/core/qp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33
  34#include <linux/gfp.h>
  35#include <linux/export.h>
  36#include <linux/mlx5/cmd.h>
  37#include <linux/mlx5/qp.h>
  38#include <linux/mlx5/driver.h>
  39#include <linux/mlx5/transobj.h>
  40
  41#include "mlx5_core.h"
  42
  43static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
  44                                                 u32 rsn)
  45{
  46        struct mlx5_qp_table *table = &dev->priv.qp_table;
  47        struct mlx5_core_rsc_common *common;
  48
  49        spin_lock(&table->lock);
  50
  51        common = radix_tree_lookup(&table->tree, rsn);
  52        if (common)
  53                atomic_inc(&common->refcount);
  54
  55        spin_unlock(&table->lock);
  56
  57        if (!common) {
  58                mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
  59                               rsn);
  60                return NULL;
  61        }
  62        return common;
  63}
  64
  65void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
  66{
  67        if (atomic_dec_and_test(&common->refcount))
  68                complete(&common->free);
  69}
  70
  71static u64 qp_allowed_event_types(void)
  72{
  73        u64 mask;
  74
  75        mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
  76               BIT(MLX5_EVENT_TYPE_COMM_EST) |
  77               BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
  78               BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
  79               BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
  80               BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
  81               BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
  82               BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
  83
  84        return mask;
  85}
  86
  87static u64 rq_allowed_event_types(void)
  88{
  89        u64 mask;
  90
  91        mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
  92               BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
  93
  94        return mask;
  95}
  96
  97static u64 sq_allowed_event_types(void)
  98{
  99        return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
 100}
 101
 102static bool is_event_type_allowed(int rsc_type, int event_type)
 103{
 104        switch (rsc_type) {
 105        case MLX5_EVENT_QUEUE_TYPE_QP:
 106                return BIT(event_type) & qp_allowed_event_types();
 107        case MLX5_EVENT_QUEUE_TYPE_RQ:
 108                return BIT(event_type) & rq_allowed_event_types();
 109        case MLX5_EVENT_QUEUE_TYPE_SQ:
 110                return BIT(event_type) & sq_allowed_event_types();
 111        default:
 112                WARN(1, "Event arrived for unknown resource type");
 113                return false;
 114        }
 115}
 116
 117void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
 118{
 119        struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
 120        struct mlx5_core_qp *qp;
 121
 122        if (!common)
 123                return;
 124
 125        if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
 126                mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
 127                               event_type, rsn);
 128                return;
 129        }
 130
 131        switch (common->res) {
 132        case MLX5_RES_QP:
 133        case MLX5_RES_RQ:
 134        case MLX5_RES_SQ:
 135                qp = (struct mlx5_core_qp *)common;
 136                qp->event(qp, event_type);
 137                break;
 138
 139        default:
 140                mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
 141        }
 142
 143        mlx5_core_put_rsc(common);
 144}
 145
 146static int create_qprqsq_common(struct mlx5_core_dev *dev,
 147                                struct mlx5_core_qp *qp,
 148                                int rsc_type)
 149{
 150        struct mlx5_qp_table *table = &dev->priv.qp_table;
 151        int err;
 152
 153        qp->common.res = rsc_type;
 154        spin_lock_irq(&table->lock);
 155        err = radix_tree_insert(&table->tree,
 156                                qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
 157                                qp);
 158        spin_unlock_irq(&table->lock);
 159        if (err)
 160                return err;
 161
 162        atomic_set(&qp->common.refcount, 1);
 163        init_completion(&qp->common.free);
 164        qp->pid = current->pid;
 165
 166        return 0;
 167}
 168
 169static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
 170                                  struct mlx5_core_qp *qp)
 171{
 172        struct mlx5_qp_table *table = &dev->priv.qp_table;
 173        unsigned long flags;
 174
 175        spin_lock_irqsave(&table->lock, flags);
 176        radix_tree_delete(&table->tree,
 177                          qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
 178        spin_unlock_irqrestore(&table->lock, flags);
 179        mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
 180        wait_for_completion(&qp->common.free);
 181}
 182
 183int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 184                        struct mlx5_core_qp *qp,
 185                        u32 *in, int inlen)
 186{
 187        u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
 188        u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
 189        u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
 190        int err;
 191
 192        MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
 193
 194        err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 195        if (err)
 196                return err;
 197
 198        qp->qpn = MLX5_GET(create_qp_out, out, qpn);
 199        mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
 200
 201        err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
 202        if (err)
 203                goto err_cmd;
 204
 205        err = mlx5_debug_qp_add(dev, qp);
 206        if (err)
 207                mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
 208                              qp->qpn);
 209
 210        atomic_inc(&dev->num_qps);
 211
 212        return 0;
 213
 214err_cmd:
 215        memset(din, 0, sizeof(din));
 216        memset(dout, 0, sizeof(dout));
 217        MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
 218        MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
 219        mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
 220        return err;
 221}
 222EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
 223
 224int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
 225                         struct mlx5_core_qp *qp)
 226{
 227        u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
 228        u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
 229        int err;
 230
 231        mlx5_debug_qp_remove(dev, qp);
 232
 233        destroy_qprqsq_common(dev, qp);
 234
 235        MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
 236        MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
 237        err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 238        if (err)
 239                return err;
 240
 241        atomic_dec(&dev->num_qps);
 242        return 0;
 243}
 244EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
 245
 246struct mbox_info {
 247        u32 *in;
 248        u32 *out;
 249        int inlen;
 250        int outlen;
 251};
 252
 253static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
 254{
 255        mbox->inlen  = inlen;
 256        mbox->outlen = outlen;
 257        mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
 258        mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
 259        if (!mbox->in || !mbox->out) {
 260                kfree(mbox->in);
 261                kfree(mbox->out);
 262                return -ENOMEM;
 263        }
 264
 265        return 0;
 266}
 267
 268static void mbox_free(struct mbox_info *mbox)
 269{
 270        kfree(mbox->in);
 271        kfree(mbox->out);
 272}
 273
 274static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
 275                                u32 opt_param_mask, void *qpc,
 276                                struct mbox_info *mbox)
 277{
 278        mbox->out = NULL;
 279        mbox->in = NULL;
 280
 281#define MBOX_ALLOC(mbox, typ)  \
 282        mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
 283
 284#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
 285        MLX5_SET(typ##_in, in, opcode, _opcode); \
 286        MLX5_SET(typ##_in, in, qpn, _qpn)
 287
 288#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
 289        MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
 290        MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
 291        memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
 292
 293        switch (opcode) {
 294        /* 2RST & 2ERR */
 295        case MLX5_CMD_OP_2RST_QP:
 296                if (MBOX_ALLOC(mbox, qp_2rst))
 297                        return -ENOMEM;
 298                MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
 299                break;
 300        case MLX5_CMD_OP_2ERR_QP:
 301                if (MBOX_ALLOC(mbox, qp_2err))
 302                        return -ENOMEM;
 303                MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
 304                break;
 305
 306        /* MODIFY with QPC */
 307        case MLX5_CMD_OP_RST2INIT_QP:
 308                if (MBOX_ALLOC(mbox, rst2init_qp))
 309                        return -ENOMEM;
 310                 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
 311                                   opt_param_mask, qpc);
 312                 break;
 313        case MLX5_CMD_OP_INIT2RTR_QP:
 314                if (MBOX_ALLOC(mbox, init2rtr_qp))
 315                        return -ENOMEM;
 316                 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
 317                                   opt_param_mask, qpc);
 318                 break;
 319        case MLX5_CMD_OP_RTR2RTS_QP:
 320                if (MBOX_ALLOC(mbox, rtr2rts_qp))
 321                        return -ENOMEM;
 322                 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
 323                                   opt_param_mask, qpc);
 324                 break;
 325        case MLX5_CMD_OP_RTS2RTS_QP:
 326                if (MBOX_ALLOC(mbox, rts2rts_qp))
 327                        return -ENOMEM;
 328                MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
 329                                  opt_param_mask, qpc);
 330                break;
 331        case MLX5_CMD_OP_SQERR2RTS_QP:
 332                if (MBOX_ALLOC(mbox, sqerr2rts_qp))
 333                        return -ENOMEM;
 334                MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
 335                                  opt_param_mask, qpc);
 336                break;
 337        case MLX5_CMD_OP_INIT2INIT_QP:
 338                if (MBOX_ALLOC(mbox, init2init_qp))
 339                        return -ENOMEM;
 340                MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
 341                                  opt_param_mask, qpc);
 342                break;
 343        default:
 344                mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
 345                              opcode, qpn);
 346                return -EINVAL;
 347        }
 348        return 0;
 349}
 350
 351int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
 352                        u32 opt_param_mask, void *qpc,
 353                        struct mlx5_core_qp *qp)
 354{
 355        struct mbox_info mbox;
 356        int err;
 357
 358        err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
 359                                   opt_param_mask, qpc, &mbox);
 360        if (err)
 361                return err;
 362
 363        err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
 364        mbox_free(&mbox);
 365        return err;
 366}
 367EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
 368
 369void mlx5_init_qp_table(struct mlx5_core_dev *dev)
 370{
 371        struct mlx5_qp_table *table = &dev->priv.qp_table;
 372
 373        memset(table, 0, sizeof(*table));
 374        spin_lock_init(&table->lock);
 375        INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 376        mlx5_qp_debugfs_init(dev);
 377}
 378
 379void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
 380{
 381        mlx5_qp_debugfs_cleanup(dev);
 382}
 383
 384int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
 385                       u32 *out, int outlen)
 386{
 387        u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
 388
 389        MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
 390        MLX5_SET(query_qp_in, in, qpn, qp->qpn);
 391        return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 392}
 393EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
 394
 395int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
 396{
 397        u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
 398        u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
 399        int err;
 400
 401        MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
 402        err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 403        if (!err)
 404                *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
 405        return err;
 406}
 407EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
 408
 409int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
 410{
 411        u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
 412        u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
 413
 414        MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
 415        MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
 416        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 417}
 418EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
 419
 420int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
 421                                struct mlx5_core_qp *rq)
 422{
 423        int err;
 424        u32 rqn;
 425
 426        err = mlx5_core_create_rq(dev, in, inlen, &rqn);
 427        if (err)
 428                return err;
 429
 430        rq->qpn = rqn;
 431        err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
 432        if (err)
 433                goto err_destroy_rq;
 434
 435        return 0;
 436
 437err_destroy_rq:
 438        mlx5_core_destroy_rq(dev, rq->qpn);
 439
 440        return err;
 441}
 442EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
 443
 444void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
 445                                  struct mlx5_core_qp *rq)
 446{
 447        destroy_qprqsq_common(dev, rq);
 448        mlx5_core_destroy_rq(dev, rq->qpn);
 449}
 450EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
 451
 452int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
 453                                struct mlx5_core_qp *sq)
 454{
 455        int err;
 456        u32 sqn;
 457
 458        err = mlx5_core_create_sq(dev, in, inlen, &sqn);
 459        if (err)
 460                return err;
 461
 462        sq->qpn = sqn;
 463        err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
 464        if (err)
 465                goto err_destroy_sq;
 466
 467        return 0;
 468
 469err_destroy_sq:
 470        mlx5_core_destroy_sq(dev, sq->qpn);
 471
 472        return err;
 473}
 474EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
 475
 476void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
 477                                  struct mlx5_core_qp *sq)
 478{
 479        destroy_qprqsq_common(dev, sq);
 480        mlx5_core_destroy_sq(dev, sq->qpn);
 481}
 482EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
 483
 484int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
 485{
 486        u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
 487        u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
 488        int err;
 489
 490        MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
 491        err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 492        if (!err)
 493                *counter_id = MLX5_GET(alloc_q_counter_out, out,
 494                                       counter_set_id);
 495        return err;
 496}
 497EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
 498
 499int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
 500{
 501        u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
 502        u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
 503
 504        MLX5_SET(dealloc_q_counter_in, in, opcode,
 505                 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
 506        MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
 507        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 508}
 509EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
 510
 511int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
 512                              int reset, void *out, int out_size)
 513{
 514        u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
 515
 516        MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
 517        MLX5_SET(query_q_counter_in, in, clear, reset);
 518        MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
 519        return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
 520}
 521EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
 522
 523int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
 524                                  u32 *out_of_buffer)
 525{
 526        int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
 527        void *out;
 528        int err;
 529
 530        out = mlx5_vzalloc(outlen);
 531        if (!out)
 532                return -ENOMEM;
 533
 534        err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
 535        if (!err)
 536                *out_of_buffer = MLX5_GET(query_q_counter_out, out,
 537                                          out_of_buffer);
 538
 539        kfree(out);
 540        return err;
 541}
 542