linux/drivers/net/ethernet/mellanox/mlx5/core/qp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33
  34#include <linux/gfp.h>
  35#include <linux/export.h>
  36#include <linux/mlx5/cmd.h>
  37#include <linux/mlx5/qp.h>
  38#include <linux/mlx5/driver.h>
  39#include <linux/mlx5/transobj.h>
  40
  41#include "mlx5_core.h"
  42
  43static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
  44                                                 u32 rsn)
  45{
  46        struct mlx5_qp_table *table = &dev->priv.qp_table;
  47        struct mlx5_core_rsc_common *common;
  48
  49        spin_lock(&table->lock);
  50
  51        common = radix_tree_lookup(&table->tree, rsn);
  52        if (common)
  53                atomic_inc(&common->refcount);
  54
  55        spin_unlock(&table->lock);
  56
  57        if (!common) {
  58                mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
  59                               rsn);
  60                return NULL;
  61        }
  62        return common;
  63}
  64
  65void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
  66{
  67        if (atomic_dec_and_test(&common->refcount))
  68                complete(&common->free);
  69}
  70
  71static u64 qp_allowed_event_types(void)
  72{
  73        u64 mask;
  74
  75        mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
  76               BIT(MLX5_EVENT_TYPE_COMM_EST) |
  77               BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
  78               BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
  79               BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
  80               BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
  81               BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
  82               BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
  83
  84        return mask;
  85}
  86
  87static u64 rq_allowed_event_types(void)
  88{
  89        u64 mask;
  90
  91        mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
  92               BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
  93
  94        return mask;
  95}
  96
  97static u64 sq_allowed_event_types(void)
  98{
  99        return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
 100}
 101
 102static bool is_event_type_allowed(int rsc_type, int event_type)
 103{
 104        switch (rsc_type) {
 105        case MLX5_EVENT_QUEUE_TYPE_QP:
 106                return BIT(event_type) & qp_allowed_event_types();
 107        case MLX5_EVENT_QUEUE_TYPE_RQ:
 108                return BIT(event_type) & rq_allowed_event_types();
 109        case MLX5_EVENT_QUEUE_TYPE_SQ:
 110                return BIT(event_type) & sq_allowed_event_types();
 111        default:
 112                WARN(1, "Event arrived for unknown resource type");
 113                return false;
 114        }
 115}
 116
 117void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
 118{
 119        struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
 120        struct mlx5_core_qp *qp;
 121
 122        if (!common)
 123                return;
 124
 125        if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
 126                mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
 127                               event_type, rsn);
 128                return;
 129        }
 130
 131        switch (common->res) {
 132        case MLX5_RES_QP:
 133        case MLX5_RES_RQ:
 134        case MLX5_RES_SQ:
 135                qp = (struct mlx5_core_qp *)common;
 136                qp->event(qp, event_type);
 137                break;
 138
 139        default:
 140                mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
 141        }
 142
 143        mlx5_core_put_rsc(common);
 144}
 145
 146#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 147void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
 148{
 149        struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
 150        int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
 151        struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
 152        struct mlx5_core_qp *qp =
 153                container_of(common, struct mlx5_core_qp, common);
 154        struct mlx5_pagefault pfault;
 155
 156        if (!qp) {
 157                mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
 158                               qpn);
 159                return;
 160        }
 161
 162        pfault.event_subtype = eqe->sub_type;
 163        pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
 164                (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
 165        pfault.bytes_committed = be32_to_cpu(
 166                pf_eqe->bytes_committed);
 167
 168        mlx5_core_dbg(dev,
 169                      "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
 170                      eqe->sub_type, pfault.flags);
 171
 172        switch (eqe->sub_type) {
 173        case MLX5_PFAULT_SUBTYPE_RDMA:
 174                /* RDMA based event */
 175                pfault.rdma.r_key =
 176                        be32_to_cpu(pf_eqe->rdma.r_key);
 177                pfault.rdma.packet_size =
 178                        be16_to_cpu(pf_eqe->rdma.packet_length);
 179                pfault.rdma.rdma_op_len =
 180                        be32_to_cpu(pf_eqe->rdma.rdma_op_len);
 181                pfault.rdma.rdma_va =
 182                        be64_to_cpu(pf_eqe->rdma.rdma_va);
 183                mlx5_core_dbg(dev,
 184                              "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
 185                              qpn, pfault.rdma.r_key);
 186                mlx5_core_dbg(dev,
 187                              "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
 188                              pfault.rdma.rdma_op_len);
 189                mlx5_core_dbg(dev,
 190                              "PAGE_FAULT: rdma_va: 0x%016llx,\n",
 191                              pfault.rdma.rdma_va);
 192                mlx5_core_dbg(dev,
 193                              "PAGE_FAULT: bytes_committed: 0x%06x\n",
 194                              pfault.bytes_committed);
 195                break;
 196
 197        case MLX5_PFAULT_SUBTYPE_WQE:
 198                /* WQE based event */
 199                pfault.wqe.wqe_index =
 200                        be16_to_cpu(pf_eqe->wqe.wqe_index);
 201                pfault.wqe.packet_size =
 202                        be16_to_cpu(pf_eqe->wqe.packet_length);
 203                mlx5_core_dbg(dev,
 204                              "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
 205                              qpn, pfault.wqe.wqe_index);
 206                mlx5_core_dbg(dev,
 207                              "PAGE_FAULT: bytes_committed: 0x%06x\n",
 208                              pfault.bytes_committed);
 209                break;
 210
 211        default:
 212                mlx5_core_warn(dev,
 213                               "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
 214                               eqe->sub_type, qpn);
 215                /* Unsupported page faults should still be resolved by the
 216                 * page fault handler
 217                 */
 218        }
 219
 220        if (qp->pfault_handler) {
 221                qp->pfault_handler(qp, &pfault);
 222        } else {
 223                mlx5_core_err(dev,
 224                              "ODP event for QP %08x, without a fault handler in QP\n",
 225                              qpn);
 226                /* Page fault will remain unresolved. QP will hang until it is
 227                 * destroyed
 228                 */
 229        }
 230
 231        mlx5_core_put_rsc(common);
 232}
 233#endif
 234
 235static int create_qprqsq_common(struct mlx5_core_dev *dev,
 236                                struct mlx5_core_qp *qp,
 237                                int rsc_type)
 238{
 239        struct mlx5_qp_table *table = &dev->priv.qp_table;
 240        int err;
 241
 242        qp->common.res = rsc_type;
 243        spin_lock_irq(&table->lock);
 244        err = radix_tree_insert(&table->tree,
 245                                qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
 246                                qp);
 247        spin_unlock_irq(&table->lock);
 248        if (err)
 249                return err;
 250
 251        atomic_set(&qp->common.refcount, 1);
 252        init_completion(&qp->common.free);
 253        qp->pid = current->pid;
 254
 255        return 0;
 256}
 257
 258static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
 259                                  struct mlx5_core_qp *qp)
 260{
 261        struct mlx5_qp_table *table = &dev->priv.qp_table;
 262        unsigned long flags;
 263
 264        spin_lock_irqsave(&table->lock, flags);
 265        radix_tree_delete(&table->tree,
 266                          qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
 267        spin_unlock_irqrestore(&table->lock, flags);
 268        mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
 269        wait_for_completion(&qp->common.free);
 270}
 271
 272int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 273                        struct mlx5_core_qp *qp,
 274                        struct mlx5_create_qp_mbox_in *in,
 275                        int inlen)
 276{
 277        struct mlx5_create_qp_mbox_out out;
 278        struct mlx5_destroy_qp_mbox_in din;
 279        struct mlx5_destroy_qp_mbox_out dout;
 280        int err;
 281
 282        memset(&out, 0, sizeof(out));
 283        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
 284
 285        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
 286        if (err) {
 287                mlx5_core_warn(dev, "ret %d\n", err);
 288                return err;
 289        }
 290
 291        if (out.hdr.status) {
 292                mlx5_core_warn(dev, "current num of QPs 0x%x\n",
 293                               atomic_read(&dev->num_qps));
 294                return mlx5_cmd_status_to_err(&out.hdr);
 295        }
 296
 297        qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
 298        mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
 299
 300        err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
 301        if (err)
 302                goto err_cmd;
 303
 304        err = mlx5_debug_qp_add(dev, qp);
 305        if (err)
 306                mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
 307                              qp->qpn);
 308
 309        atomic_inc(&dev->num_qps);
 310
 311        return 0;
 312
 313err_cmd:
 314        memset(&din, 0, sizeof(din));
 315        memset(&dout, 0, sizeof(dout));
 316        din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
 317        din.qpn = cpu_to_be32(qp->qpn);
 318        mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
 319
 320        return err;
 321}
 322EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
 323
 324int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
 325                         struct mlx5_core_qp *qp)
 326{
 327        struct mlx5_destroy_qp_mbox_in in;
 328        struct mlx5_destroy_qp_mbox_out out;
 329        int err;
 330
 331        mlx5_debug_qp_remove(dev, qp);
 332
 333        destroy_qprqsq_common(dev, qp);
 334
 335        memset(&in, 0, sizeof(in));
 336        memset(&out, 0, sizeof(out));
 337        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
 338        in.qpn = cpu_to_be32(qp->qpn);
 339        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
 340        if (err)
 341                return err;
 342
 343        if (out.hdr.status)
 344                return mlx5_cmd_status_to_err(&out.hdr);
 345
 346        atomic_dec(&dev->num_qps);
 347        return 0;
 348}
 349EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
 350
 351int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
 352                        struct mlx5_modify_qp_mbox_in *in, int sqd_event,
 353                        struct mlx5_core_qp *qp)
 354{
 355        struct mlx5_modify_qp_mbox_out out;
 356        int err = 0;
 357
 358        memset(&out, 0, sizeof(out));
 359        in->hdr.opcode = cpu_to_be16(operation);
 360        in->qpn = cpu_to_be32(qp->qpn);
 361        err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
 362        if (err)
 363                return err;
 364
 365        return mlx5_cmd_status_to_err(&out.hdr);
 366}
 367EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
 368
 369void mlx5_init_qp_table(struct mlx5_core_dev *dev)
 370{
 371        struct mlx5_qp_table *table = &dev->priv.qp_table;
 372
 373        memset(table, 0, sizeof(*table));
 374        spin_lock_init(&table->lock);
 375        INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 376        mlx5_qp_debugfs_init(dev);
 377}
 378
 379void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
 380{
 381        mlx5_qp_debugfs_cleanup(dev);
 382}
 383
 384int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
 385                       struct mlx5_query_qp_mbox_out *out, int outlen)
 386{
 387        struct mlx5_query_qp_mbox_in in;
 388        int err;
 389
 390        memset(&in, 0, sizeof(in));
 391        memset(out, 0, outlen);
 392        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
 393        in.qpn = cpu_to_be32(qp->qpn);
 394        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
 395        if (err)
 396                return err;
 397
 398        if (out->hdr.status)
 399                return mlx5_cmd_status_to_err(&out->hdr);
 400
 401        return err;
 402}
 403EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
 404
 405int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
 406{
 407        struct mlx5_alloc_xrcd_mbox_in in;
 408        struct mlx5_alloc_xrcd_mbox_out out;
 409        int err;
 410
 411        memset(&in, 0, sizeof(in));
 412        memset(&out, 0, sizeof(out));
 413        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
 414        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
 415        if (err)
 416                return err;
 417
 418        if (out.hdr.status)
 419                err = mlx5_cmd_status_to_err(&out.hdr);
 420        else
 421                *xrcdn = be32_to_cpu(out.xrcdn);
 422
 423        return err;
 424}
 425EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
 426
 427int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
 428{
 429        struct mlx5_dealloc_xrcd_mbox_in in;
 430        struct mlx5_dealloc_xrcd_mbox_out out;
 431        int err;
 432
 433        memset(&in, 0, sizeof(in));
 434        memset(&out, 0, sizeof(out));
 435        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
 436        in.xrcdn = cpu_to_be32(xrcdn);
 437        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
 438        if (err)
 439                return err;
 440
 441        if (out.hdr.status)
 442                err = mlx5_cmd_status_to_err(&out.hdr);
 443
 444        return err;
 445}
 446EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
 447
 448#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 449int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
 450                                u8 flags, int error)
 451{
 452        struct mlx5_page_fault_resume_mbox_in in;
 453        struct mlx5_page_fault_resume_mbox_out out;
 454        int err;
 455
 456        memset(&in, 0, sizeof(in));
 457        memset(&out, 0, sizeof(out));
 458        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
 459        in.hdr.opmod = 0;
 460        flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
 461                  MLX5_PAGE_FAULT_RESUME_WRITE     |
 462                  MLX5_PAGE_FAULT_RESUME_RDMA);
 463        flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
 464        in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
 465                                   (flags << MLX5_QPN_BITS));
 466        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
 467        if (err)
 468                return err;
 469
 470        if (out.hdr.status)
 471                err = mlx5_cmd_status_to_err(&out.hdr);
 472
 473        return err;
 474}
 475EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
 476#endif
 477
 478int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
 479                                struct mlx5_core_qp *rq)
 480{
 481        int err;
 482        u32 rqn;
 483
 484        err = mlx5_core_create_rq(dev, in, inlen, &rqn);
 485        if (err)
 486                return err;
 487
 488        rq->qpn = rqn;
 489        err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
 490        if (err)
 491                goto err_destroy_rq;
 492
 493        return 0;
 494
 495err_destroy_rq:
 496        mlx5_core_destroy_rq(dev, rq->qpn);
 497
 498        return err;
 499}
 500EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
 501
 502void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
 503                                  struct mlx5_core_qp *rq)
 504{
 505        destroy_qprqsq_common(dev, rq);
 506        mlx5_core_destroy_rq(dev, rq->qpn);
 507}
 508EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
 509
 510int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
 511                                struct mlx5_core_qp *sq)
 512{
 513        int err;
 514        u32 sqn;
 515
 516        err = mlx5_core_create_sq(dev, in, inlen, &sqn);
 517        if (err)
 518                return err;
 519
 520        sq->qpn = sqn;
 521        err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
 522        if (err)
 523                goto err_destroy_sq;
 524
 525        return 0;
 526
 527err_destroy_sq:
 528        mlx5_core_destroy_sq(dev, sq->qpn);
 529
 530        return err;
 531}
 532EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
 533
 534void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
 535                                  struct mlx5_core_qp *sq)
 536{
 537        destroy_qprqsq_common(dev, sq);
 538        mlx5_core_destroy_sq(dev, sq->qpn);
 539}
 540EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
 541