linux/drivers/infiniband/hw/mlx5/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/kref.h>
  34#include <rdma/ib_umem.h>
  35#include <rdma/ib_user_verbs.h>
  36#include "mlx5_ib.h"
  37#include "user.h"
  38
  39static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
  40{
  41        struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
  42
  43        ibcq->comp_handler(ibcq, ibcq->cq_context);
  44}
  45
  46static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
  47{
  48        struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
  49        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  50        struct ib_cq *ibcq = &cq->ibcq;
  51        struct ib_event event;
  52
  53        if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
  54                mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
  55                             type, mcq->cqn);
  56                return;
  57        }
  58
  59        if (ibcq->event_handler) {
  60                event.device     = &dev->ib_dev;
  61                event.event      = IB_EVENT_CQ_ERR;
  62                event.element.cq = ibcq;
  63                ibcq->event_handler(&event, ibcq->cq_context);
  64        }
  65}
  66
  67static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
  68{
  69        return mlx5_buf_offset(&buf->buf, n * size);
  70}
  71
  72static void *get_cqe(struct mlx5_ib_cq *cq, int n)
  73{
  74        return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
  75}
  76
  77static u8 sw_ownership_bit(int n, int nent)
  78{
  79        return (n & nent) ? 1 : 0;
  80}
  81
  82static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
  83{
  84        void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
  85        struct mlx5_cqe64 *cqe64;
  86
  87        cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
  88
  89        if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
  90            !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
  91                return cqe;
  92        } else {
  93                return NULL;
  94        }
  95}
  96
  97static void *next_cqe_sw(struct mlx5_ib_cq *cq)
  98{
  99        return get_sw_cqe(cq, cq->mcq.cons_index);
 100}
 101
 102static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
 103{
 104        switch (wq->wr_data[idx]) {
 105        case MLX5_IB_WR_UMR:
 106                return 0;
 107
 108        case IB_WR_LOCAL_INV:
 109                return IB_WC_LOCAL_INV;
 110
 111        case IB_WR_FAST_REG_MR:
 112                return IB_WC_FAST_REG_MR;
 113
 114        default:
 115                pr_warn("unknown completion status\n");
 116                return 0;
 117        }
 118}
 119
 120static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
 121                            struct mlx5_ib_wq *wq, int idx)
 122{
 123        wc->wc_flags = 0;
 124        switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
 125        case MLX5_OPCODE_RDMA_WRITE_IMM:
 126                wc->wc_flags |= IB_WC_WITH_IMM;
 127        case MLX5_OPCODE_RDMA_WRITE:
 128                wc->opcode    = IB_WC_RDMA_WRITE;
 129                break;
 130        case MLX5_OPCODE_SEND_IMM:
 131                wc->wc_flags |= IB_WC_WITH_IMM;
 132        case MLX5_OPCODE_SEND:
 133        case MLX5_OPCODE_SEND_INVAL:
 134                wc->opcode    = IB_WC_SEND;
 135                break;
 136        case MLX5_OPCODE_RDMA_READ:
 137                wc->opcode    = IB_WC_RDMA_READ;
 138                wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
 139                break;
 140        case MLX5_OPCODE_ATOMIC_CS:
 141                wc->opcode    = IB_WC_COMP_SWAP;
 142                wc->byte_len  = 8;
 143                break;
 144        case MLX5_OPCODE_ATOMIC_FA:
 145                wc->opcode    = IB_WC_FETCH_ADD;
 146                wc->byte_len  = 8;
 147                break;
 148        case MLX5_OPCODE_ATOMIC_MASKED_CS:
 149                wc->opcode    = IB_WC_MASKED_COMP_SWAP;
 150                wc->byte_len  = 8;
 151                break;
 152        case MLX5_OPCODE_ATOMIC_MASKED_FA:
 153                wc->opcode    = IB_WC_MASKED_FETCH_ADD;
 154                wc->byte_len  = 8;
 155                break;
 156        case MLX5_OPCODE_BIND_MW:
 157                wc->opcode    = IB_WC_BIND_MW;
 158                break;
 159        case MLX5_OPCODE_UMR:
 160                wc->opcode = get_umr_comp(wq, idx);
 161                break;
 162        }
 163}
 164
 165enum {
 166        MLX5_GRH_IN_BUFFER = 1,
 167        MLX5_GRH_IN_CQE    = 2,
 168};
 169
 170static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
 171                             struct mlx5_ib_qp *qp)
 172{
 173        struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
 174        struct mlx5_ib_srq *srq;
 175        struct mlx5_ib_wq *wq;
 176        u16 wqe_ctr;
 177        u8 g;
 178
 179        if (qp->ibqp.srq || qp->ibqp.xrcd) {
 180                struct mlx5_core_srq *msrq = NULL;
 181
 182                if (qp->ibqp.xrcd) {
 183                        msrq = mlx5_core_get_srq(dev->mdev,
 184                                                 be32_to_cpu(cqe->srqn));
 185                        srq = to_mibsrq(msrq);
 186                } else {
 187                        srq = to_msrq(qp->ibqp.srq);
 188                }
 189                if (srq) {
 190                        wqe_ctr = be16_to_cpu(cqe->wqe_counter);
 191                        wc->wr_id = srq->wrid[wqe_ctr];
 192                        mlx5_ib_free_srq_wqe(srq, wqe_ctr);
 193                        if (msrq && atomic_dec_and_test(&msrq->refcount))
 194                                complete(&msrq->free);
 195                }
 196        } else {
 197                wq        = &qp->rq;
 198                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 199                ++wq->tail;
 200        }
 201        wc->byte_len = be32_to_cpu(cqe->byte_cnt);
 202
 203        switch (cqe->op_own >> 4) {
 204        case MLX5_CQE_RESP_WR_IMM:
 205                wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
 206                wc->wc_flags    = IB_WC_WITH_IMM;
 207                wc->ex.imm_data = cqe->imm_inval_pkey;
 208                break;
 209        case MLX5_CQE_RESP_SEND:
 210                wc->opcode   = IB_WC_RECV;
 211                wc->wc_flags = 0;
 212                break;
 213        case MLX5_CQE_RESP_SEND_IMM:
 214                wc->opcode      = IB_WC_RECV;
 215                wc->wc_flags    = IB_WC_WITH_IMM;
 216                wc->ex.imm_data = cqe->imm_inval_pkey;
 217                break;
 218        case MLX5_CQE_RESP_SEND_INV:
 219                wc->opcode      = IB_WC_RECV;
 220                wc->wc_flags    = IB_WC_WITH_INVALIDATE;
 221                wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
 222                break;
 223        }
 224        wc->slid           = be16_to_cpu(cqe->slid);
 225        wc->sl             = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
 226        wc->src_qp         = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
 227        wc->dlid_path_bits = cqe->ml_path;
 228        g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
 229        wc->wc_flags |= g ? IB_WC_GRH : 0;
 230        wc->pkey_index     = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
 231}
 232
 233static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
 234{
 235        __be32 *p = (__be32 *)cqe;
 236        int i;
 237
 238        mlx5_ib_warn(dev, "dump error cqe\n");
 239        for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
 240                pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
 241                        be32_to_cpu(p[1]), be32_to_cpu(p[2]),
 242                        be32_to_cpu(p[3]));
 243}
 244
 245static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
 246                                  struct mlx5_err_cqe *cqe,
 247                                  struct ib_wc *wc)
 248{
 249        int dump = 1;
 250
 251        switch (cqe->syndrome) {
 252        case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
 253                wc->status = IB_WC_LOC_LEN_ERR;
 254                break;
 255        case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
 256                wc->status = IB_WC_LOC_QP_OP_ERR;
 257                break;
 258        case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
 259                wc->status = IB_WC_LOC_PROT_ERR;
 260                break;
 261        case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
 262                dump = 0;
 263                wc->status = IB_WC_WR_FLUSH_ERR;
 264                break;
 265        case MLX5_CQE_SYNDROME_MW_BIND_ERR:
 266                wc->status = IB_WC_MW_BIND_ERR;
 267                break;
 268        case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
 269                wc->status = IB_WC_BAD_RESP_ERR;
 270                break;
 271        case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
 272                wc->status = IB_WC_LOC_ACCESS_ERR;
 273                break;
 274        case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
 275                wc->status = IB_WC_REM_INV_REQ_ERR;
 276                break;
 277        case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
 278                wc->status = IB_WC_REM_ACCESS_ERR;
 279                break;
 280        case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
 281                wc->status = IB_WC_REM_OP_ERR;
 282                break;
 283        case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
 284                wc->status = IB_WC_RETRY_EXC_ERR;
 285                dump = 0;
 286                break;
 287        case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
 288                wc->status = IB_WC_RNR_RETRY_EXC_ERR;
 289                dump = 0;
 290                break;
 291        case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
 292                wc->status = IB_WC_REM_ABORT_ERR;
 293                break;
 294        default:
 295                wc->status = IB_WC_GENERAL_ERR;
 296                break;
 297        }
 298
 299        wc->vendor_err = cqe->vendor_err_synd;
 300        if (dump)
 301                dump_cqe(dev, cqe);
 302}
 303
 304static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
 305{
 306        /* TBD: waiting decision
 307        */
 308        return 0;
 309}
 310
 311static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
 312{
 313        struct mlx5_wqe_data_seg *dpseg;
 314        void *addr;
 315
 316        dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
 317                sizeof(struct mlx5_wqe_raddr_seg) +
 318                sizeof(struct mlx5_wqe_atomic_seg);
 319        addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
 320        return addr;
 321}
 322
 323static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 324                          uint16_t idx)
 325{
 326        void *addr;
 327        int byte_count;
 328        int i;
 329
 330        if (!is_atomic_response(qp, idx))
 331                return;
 332
 333        byte_count = be32_to_cpu(cqe64->byte_cnt);
 334        addr = mlx5_get_atomic_laddr(qp, idx);
 335
 336        if (byte_count == 4) {
 337                *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
 338        } else {
 339                for (i = 0; i < byte_count; i += 8) {
 340                        *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
 341                        addr += 8;
 342                }
 343        }
 344
 345        return;
 346}
 347
 348static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 349                           u16 tail, u16 head)
 350{
 351        u16 idx;
 352
 353        do {
 354                idx = tail & (qp->sq.wqe_cnt - 1);
 355                handle_atomic(qp, cqe64, idx);
 356                if (idx == head)
 357                        break;
 358
 359                tail = qp->sq.w_list[idx].next;
 360        } while (1);
 361        tail = qp->sq.w_list[idx].next;
 362        qp->sq.last_poll = tail;
 363}
 364
 365static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 366{
 367        mlx5_buf_free(dev->mdev, &buf->buf);
 368}
 369
 370static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
 371                             struct ib_sig_err *item)
 372{
 373        u16 syndrome = be16_to_cpu(cqe->syndrome);
 374
 375#define GUARD_ERR   (1 << 13)
 376#define APPTAG_ERR  (1 << 12)
 377#define REFTAG_ERR  (1 << 11)
 378
 379        if (syndrome & GUARD_ERR) {
 380                item->err_type = IB_SIG_BAD_GUARD;
 381                item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
 382                item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
 383        } else
 384        if (syndrome & REFTAG_ERR) {
 385                item->err_type = IB_SIG_BAD_REFTAG;
 386                item->expected = be32_to_cpu(cqe->expected_reftag);
 387                item->actual = be32_to_cpu(cqe->actual_reftag);
 388        } else
 389        if (syndrome & APPTAG_ERR) {
 390                item->err_type = IB_SIG_BAD_APPTAG;
 391                item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
 392                item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
 393        } else {
 394                pr_err("Got signature completion error with bad syndrome %04x\n",
 395                       syndrome);
 396        }
 397
 398        item->sig_err_offset = be64_to_cpu(cqe->err_offset);
 399        item->key = be32_to_cpu(cqe->mkey);
 400}
 401
 402static int mlx5_poll_one(struct mlx5_ib_cq *cq,
 403                         struct mlx5_ib_qp **cur_qp,
 404                         struct ib_wc *wc)
 405{
 406        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
 407        struct mlx5_err_cqe *err_cqe;
 408        struct mlx5_cqe64 *cqe64;
 409        struct mlx5_core_qp *mqp;
 410        struct mlx5_ib_wq *wq;
 411        struct mlx5_sig_err_cqe *sig_err_cqe;
 412        struct mlx5_core_mr *mmr;
 413        struct mlx5_ib_mr *mr;
 414        uint8_t opcode;
 415        uint32_t qpn;
 416        u16 wqe_ctr;
 417        void *cqe;
 418        int idx;
 419
 420repoll:
 421        cqe = next_cqe_sw(cq);
 422        if (!cqe)
 423                return -EAGAIN;
 424
 425        cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
 426
 427        ++cq->mcq.cons_index;
 428
 429        /* Make sure we read CQ entry contents after we've checked the
 430         * ownership bit.
 431         */
 432        rmb();
 433
 434        opcode = cqe64->op_own >> 4;
 435        if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
 436                if (likely(cq->resize_buf)) {
 437                        free_cq_buf(dev, &cq->buf);
 438                        cq->buf = *cq->resize_buf;
 439                        kfree(cq->resize_buf);
 440                        cq->resize_buf = NULL;
 441                        goto repoll;
 442                } else {
 443                        mlx5_ib_warn(dev, "unexpected resize cqe\n");
 444                }
 445        }
 446
 447        qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
 448        if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
 449                /* We do not have to take the QP table lock here,
 450                 * because CQs will be locked while QPs are removed
 451                 * from the table.
 452                 */
 453                mqp = __mlx5_qp_lookup(dev->mdev, qpn);
 454                if (unlikely(!mqp)) {
 455                        mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
 456                                     cq->mcq.cqn, qpn);
 457                        return -EINVAL;
 458                }
 459
 460                *cur_qp = to_mibqp(mqp);
 461        }
 462
 463        wc->qp  = &(*cur_qp)->ibqp;
 464        switch (opcode) {
 465        case MLX5_CQE_REQ:
 466                wq = &(*cur_qp)->sq;
 467                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
 468                idx = wqe_ctr & (wq->wqe_cnt - 1);
 469                handle_good_req(wc, cqe64, wq, idx);
 470                handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
 471                wc->wr_id = wq->wrid[idx];
 472                wq->tail = wq->wqe_head[idx] + 1;
 473                wc->status = IB_WC_SUCCESS;
 474                break;
 475        case MLX5_CQE_RESP_WR_IMM:
 476        case MLX5_CQE_RESP_SEND:
 477        case MLX5_CQE_RESP_SEND_IMM:
 478        case MLX5_CQE_RESP_SEND_INV:
 479                handle_responder(wc, cqe64, *cur_qp);
 480                wc->status = IB_WC_SUCCESS;
 481                break;
 482        case MLX5_CQE_RESIZE_CQ:
 483                break;
 484        case MLX5_CQE_REQ_ERR:
 485        case MLX5_CQE_RESP_ERR:
 486                err_cqe = (struct mlx5_err_cqe *)cqe64;
 487                mlx5_handle_error_cqe(dev, err_cqe, wc);
 488                mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
 489                            opcode == MLX5_CQE_REQ_ERR ?
 490                            "Requestor" : "Responder", cq->mcq.cqn);
 491                mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
 492                            err_cqe->syndrome, err_cqe->vendor_err_synd);
 493                if (opcode == MLX5_CQE_REQ_ERR) {
 494                        wq = &(*cur_qp)->sq;
 495                        wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
 496                        idx = wqe_ctr & (wq->wqe_cnt - 1);
 497                        wc->wr_id = wq->wrid[idx];
 498                        wq->tail = wq->wqe_head[idx] + 1;
 499                } else {
 500                        struct mlx5_ib_srq *srq;
 501
 502                        if ((*cur_qp)->ibqp.srq) {
 503                                srq = to_msrq((*cur_qp)->ibqp.srq);
 504                                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
 505                                wc->wr_id = srq->wrid[wqe_ctr];
 506                                mlx5_ib_free_srq_wqe(srq, wqe_ctr);
 507                        } else {
 508                                wq = &(*cur_qp)->rq;
 509                                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 510                                ++wq->tail;
 511                        }
 512                }
 513                break;
 514        case MLX5_CQE_SIG_ERR:
 515                sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
 516
 517                read_lock(&dev->mdev->priv.mr_table.lock);
 518                mmr = __mlx5_mr_lookup(dev->mdev,
 519                                       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
 520                if (unlikely(!mmr)) {
 521                        read_unlock(&dev->mdev->priv.mr_table.lock);
 522                        mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
 523                                     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
 524                        return -EINVAL;
 525                }
 526
 527                mr = to_mibmr(mmr);
 528                get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
 529                mr->sig->sig_err_exists = true;
 530                mr->sig->sigerr_count++;
 531
 532                mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
 533                             cq->mcq.cqn, mr->sig->err_item.key,
 534                             mr->sig->err_item.err_type,
 535                             mr->sig->err_item.sig_err_offset,
 536                             mr->sig->err_item.expected,
 537                             mr->sig->err_item.actual);
 538
 539                read_unlock(&dev->mdev->priv.mr_table.lock);
 540                goto repoll;
 541        }
 542
 543        return 0;
 544}
 545
 546int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 547{
 548        struct mlx5_ib_cq *cq = to_mcq(ibcq);
 549        struct mlx5_ib_qp *cur_qp = NULL;
 550        unsigned long flags;
 551        int npolled;
 552        int err = 0;
 553
 554        spin_lock_irqsave(&cq->lock, flags);
 555
 556        for (npolled = 0; npolled < num_entries; npolled++) {
 557                err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
 558                if (err)
 559                        break;
 560        }
 561
 562        if (npolled)
 563                mlx5_cq_set_ci(&cq->mcq);
 564
 565        spin_unlock_irqrestore(&cq->lock, flags);
 566
 567        if (err == 0 || err == -EAGAIN)
 568                return npolled;
 569        else
 570                return err;
 571}
 572
 573int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 574{
 575        struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
 576        void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
 577
 578        mlx5_cq_arm(&to_mcq(ibcq)->mcq,
 579                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
 580                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
 581                    uar_page,
 582                    MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
 583                    to_mcq(ibcq)->mcq.cons_index);
 584
 585        return 0;
 586}
 587
 588static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
 589                        int nent, int cqe_size)
 590{
 591        int err;
 592
 593        err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
 594                             PAGE_SIZE * 2, &buf->buf);
 595        if (err)
 596                return err;
 597
 598        buf->cqe_size = cqe_size;
 599        buf->nent = nent;
 600
 601        return 0;
 602}
 603
 604static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
 605                          struct ib_ucontext *context, struct mlx5_ib_cq *cq,
 606                          int entries, struct mlx5_create_cq_mbox_in **cqb,
 607                          int *cqe_size, int *index, int *inlen)
 608{
 609        struct mlx5_ib_create_cq ucmd;
 610        size_t ucmdlen;
 611        int page_shift;
 612        int npages;
 613        int ncont;
 614        int err;
 615
 616        ucmdlen =
 617                (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
 618                 sizeof(ucmd)) ? (sizeof(ucmd) -
 619                                  sizeof(ucmd.reserved)) : sizeof(ucmd);
 620
 621        if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
 622                return -EFAULT;
 623
 624        if (ucmdlen == sizeof(ucmd) &&
 625            ucmd.reserved != 0)
 626                return -EINVAL;
 627
 628        if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
 629                return -EINVAL;
 630
 631        *cqe_size = ucmd.cqe_size;
 632
 633        cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
 634                                   entries * ucmd.cqe_size,
 635                                   IB_ACCESS_LOCAL_WRITE, 1);
 636        if (IS_ERR(cq->buf.umem)) {
 637                err = PTR_ERR(cq->buf.umem);
 638                return err;
 639        }
 640
 641        err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
 642                                  &cq->db);
 643        if (err)
 644                goto err_umem;
 645
 646        mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
 647                           &ncont, NULL);
 648        mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
 649                    ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
 650
 651        *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
 652        *cqb = mlx5_vzalloc(*inlen);
 653        if (!*cqb) {
 654                err = -ENOMEM;
 655                goto err_db;
 656        }
 657        mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
 658        (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 659
 660        *index = to_mucontext(context)->uuari.uars[0].index;
 661
 662        return 0;
 663
 664err_db:
 665        mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
 666
 667err_umem:
 668        ib_umem_release(cq->buf.umem);
 669        return err;
 670}
 671
 672static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
 673{
 674        mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
 675        ib_umem_release(cq->buf.umem);
 676}
 677
 678static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
 679{
 680        int i;
 681        void *cqe;
 682        struct mlx5_cqe64 *cqe64;
 683
 684        for (i = 0; i < buf->nent; i++) {
 685                cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
 686                cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
 687                cqe64->op_own = MLX5_CQE_INVALID << 4;
 688        }
 689}
 690
 691static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 692                            int entries, int cqe_size,
 693                            struct mlx5_create_cq_mbox_in **cqb,
 694                            int *index, int *inlen)
 695{
 696        int err;
 697
 698        err = mlx5_db_alloc(dev->mdev, &cq->db);
 699        if (err)
 700                return err;
 701
 702        cq->mcq.set_ci_db  = cq->db.db;
 703        cq->mcq.arm_db     = cq->db.db + 1;
 704        cq->mcq.cqe_sz = cqe_size;
 705
 706        err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
 707        if (err)
 708                goto err_db;
 709
 710        init_cq_buf(cq, &cq->buf);
 711
 712        *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
 713        *cqb = mlx5_vzalloc(*inlen);
 714        if (!*cqb) {
 715                err = -ENOMEM;
 716                goto err_buf;
 717        }
 718        mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
 719
 720        (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 721        *index = dev->mdev->priv.uuari.uars[0].index;
 722
 723        return 0;
 724
 725err_buf:
 726        free_cq_buf(dev, &cq->buf);
 727
 728err_db:
 729        mlx5_db_free(dev->mdev, &cq->db);
 730        return err;
 731}
 732
 733static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
 734{
 735        free_cq_buf(dev, &cq->buf);
 736        mlx5_db_free(dev->mdev, &cq->db);
 737}
 738
 739struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
 740                                int vector, struct ib_ucontext *context,
 741                                struct ib_udata *udata)
 742{
 743        struct mlx5_create_cq_mbox_in *cqb = NULL;
 744        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 745        struct mlx5_ib_cq *cq;
 746        int uninitialized_var(index);
 747        int uninitialized_var(inlen);
 748        int cqe_size;
 749        int irqn;
 750        int eqn;
 751        int err;
 752
 753        if (entries < 0)
 754                return ERR_PTR(-EINVAL);
 755
 756        entries = roundup_pow_of_two(entries + 1);
 757        if (entries > dev->mdev->caps.gen.max_cqes)
 758                return ERR_PTR(-EINVAL);
 759
 760        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 761        if (!cq)
 762                return ERR_PTR(-ENOMEM);
 763
 764        cq->ibcq.cqe = entries - 1;
 765        mutex_init(&cq->resize_mutex);
 766        spin_lock_init(&cq->lock);
 767        cq->resize_buf = NULL;
 768        cq->resize_umem = NULL;
 769
 770        if (context) {
 771                err = create_cq_user(dev, udata, context, cq, entries,
 772                                     &cqb, &cqe_size, &index, &inlen);
 773                if (err)
 774                        goto err_create;
 775        } else {
 776                /* for now choose 64 bytes till we have a proper interface */
 777                cqe_size = 64;
 778                err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
 779                                       &index, &inlen);
 780                if (err)
 781                        goto err_create;
 782        }
 783
 784        cq->cqe_size = cqe_size;
 785        cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
 786        cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
 787        err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
 788        if (err)
 789                goto err_cqb;
 790
 791        cqb->ctx.c_eqn = cpu_to_be16(eqn);
 792        cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
 793
 794        err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
 795        if (err)
 796                goto err_cqb;
 797
 798        mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
 799        cq->mcq.irqn = irqn;
 800        cq->mcq.comp  = mlx5_ib_cq_comp;
 801        cq->mcq.event = mlx5_ib_cq_event;
 802
 803        if (context)
 804                if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
 805                        err = -EFAULT;
 806                        goto err_cmd;
 807                }
 808
 809
 810        kvfree(cqb);
 811        return &cq->ibcq;
 812
 813err_cmd:
 814        mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
 815
 816err_cqb:
 817        kvfree(cqb);
 818        if (context)
 819                destroy_cq_user(cq, context);
 820        else
 821                destroy_cq_kernel(dev, cq);
 822
 823err_create:
 824        kfree(cq);
 825
 826        return ERR_PTR(err);
 827}
 828
 829
 830int mlx5_ib_destroy_cq(struct ib_cq *cq)
 831{
 832        struct mlx5_ib_dev *dev = to_mdev(cq->device);
 833        struct mlx5_ib_cq *mcq = to_mcq(cq);
 834        struct ib_ucontext *context = NULL;
 835
 836        if (cq->uobject)
 837                context = cq->uobject->context;
 838
 839        mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
 840        if (context)
 841                destroy_cq_user(mcq, context);
 842        else
 843                destroy_cq_kernel(dev, mcq);
 844
 845        kfree(mcq);
 846
 847        return 0;
 848}
 849
 850static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
 851{
 852        return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
 853}
 854
 855void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
 856{
 857        struct mlx5_cqe64 *cqe64, *dest64;
 858        void *cqe, *dest;
 859        u32 prod_index;
 860        int nfreed = 0;
 861        u8 owner_bit;
 862
 863        if (!cq)
 864                return;
 865
 866        /* First we need to find the current producer index, so we
 867         * know where to start cleaning from.  It doesn't matter if HW
 868         * adds new entries after this loop -- the QP we're worried
 869         * about is already in RESET, so the new entries won't come
 870         * from our QP and therefore don't need to be checked.
 871         */
 872        for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
 873                if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
 874                        break;
 875
 876        /* Now sweep backwards through the CQ, removing CQ entries
 877         * that match our QP by copying older entries on top of them.
 878         */
 879        while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
 880                cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
 881                cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
 882                if (is_equal_rsn(cqe64, rsn)) {
 883                        if (srq && (ntohl(cqe64->srqn) & 0xffffff))
 884                                mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
 885                        ++nfreed;
 886                } else if (nfreed) {
 887                        dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
 888                        dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
 889                        owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
 890                        memcpy(dest, cqe, cq->mcq.cqe_sz);
 891                        dest64->op_own = owner_bit |
 892                                (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
 893                }
 894        }
 895
 896        if (nfreed) {
 897                cq->mcq.cons_index += nfreed;
 898                /* Make sure update of buffer contents is done before
 899                 * updating consumer index.
 900                 */
 901                wmb();
 902                mlx5_cq_set_ci(&cq->mcq);
 903        }
 904}
 905
 906void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
 907{
 908        if (!cq)
 909                return;
 910
 911        spin_lock_irq(&cq->lock);
 912        __mlx5_ib_cq_clean(cq, qpn, srq);
 913        spin_unlock_irq(&cq->lock);
 914}
 915
 916int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 917{
 918        struct mlx5_modify_cq_mbox_in *in;
 919        struct mlx5_ib_dev *dev = to_mdev(cq->device);
 920        struct mlx5_ib_cq *mcq = to_mcq(cq);
 921        int err;
 922        u32 fsel;
 923
 924        if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
 925                return -ENOSYS;
 926
 927        in = kzalloc(sizeof(*in), GFP_KERNEL);
 928        if (!in)
 929                return -ENOMEM;
 930
 931        in->cqn = cpu_to_be32(mcq->mcq.cqn);
 932        fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
 933        in->ctx.cq_period = cpu_to_be16(cq_period);
 934        in->ctx.cq_max_count = cpu_to_be16(cq_count);
 935        in->field_select = cpu_to_be32(fsel);
 936        err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
 937        kfree(in);
 938
 939        if (err)
 940                mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
 941
 942        return err;
 943}
 944
 945static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 946                       int entries, struct ib_udata *udata, int *npas,
 947                       int *page_shift, int *cqe_size)
 948{
 949        struct mlx5_ib_resize_cq ucmd;
 950        struct ib_umem *umem;
 951        int err;
 952        int npages;
 953        struct ib_ucontext *context = cq->buf.umem->context;
 954
 955        err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
 956        if (err)
 957                return err;
 958
 959        if (ucmd.reserved0 || ucmd.reserved1)
 960                return -EINVAL;
 961
 962        umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
 963                           IB_ACCESS_LOCAL_WRITE, 1);
 964        if (IS_ERR(umem)) {
 965                err = PTR_ERR(umem);
 966                return err;
 967        }
 968
 969        mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
 970                           npas, NULL);
 971
 972        cq->resize_umem = umem;
 973        *cqe_size = ucmd.cqe_size;
 974
 975        return 0;
 976}
 977
 978static void un_resize_user(struct mlx5_ib_cq *cq)
 979{
 980        ib_umem_release(cq->resize_umem);
 981}
 982
 983static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 984                         int entries, int cqe_size)
 985{
 986        int err;
 987
 988        cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
 989        if (!cq->resize_buf)
 990                return -ENOMEM;
 991
 992        err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
 993        if (err)
 994                goto ex;
 995
 996        init_cq_buf(cq, cq->resize_buf);
 997
 998        return 0;
 999
1000ex:
1001        kfree(cq->resize_buf);
1002        return err;
1003}
1004
1005static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1006{
1007        free_cq_buf(dev, cq->resize_buf);
1008        cq->resize_buf = NULL;
1009}
1010
1011static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1012{
1013        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1014        struct mlx5_cqe64 *scqe64;
1015        struct mlx5_cqe64 *dcqe64;
1016        void *start_cqe;
1017        void *scqe;
1018        void *dcqe;
1019        int ssize;
1020        int dsize;
1021        int i;
1022        u8 sw_own;
1023
1024        ssize = cq->buf.cqe_size;
1025        dsize = cq->resize_buf->cqe_size;
1026        if (ssize != dsize) {
1027                mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1028                return -EINVAL;
1029        }
1030
1031        i = cq->mcq.cons_index;
1032        scqe = get_sw_cqe(cq, i);
1033        scqe64 = ssize == 64 ? scqe : scqe + 64;
1034        start_cqe = scqe;
1035        if (!scqe) {
1036                mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1037                return -EINVAL;
1038        }
1039
1040        while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1041                dcqe = get_cqe_from_buf(cq->resize_buf,
1042                                        (i + 1) & (cq->resize_buf->nent),
1043                                        dsize);
1044                dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1045                sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1046                memcpy(dcqe, scqe, dsize);
1047                dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1048
1049                ++i;
1050                scqe = get_sw_cqe(cq, i);
1051                scqe64 = ssize == 64 ? scqe : scqe + 64;
1052                if (!scqe) {
1053                        mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1054                        return -EINVAL;
1055                }
1056
1057                if (scqe == start_cqe) {
1058                        pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1059                                cq->mcq.cqn);
1060                        return -ENOMEM;
1061                }
1062        }
1063        ++cq->mcq.cons_index;
1064        return 0;
1065}
1066
1067int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1068{
1069        struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1070        struct mlx5_ib_cq *cq = to_mcq(ibcq);
1071        struct mlx5_modify_cq_mbox_in *in;
1072        int err;
1073        int npas;
1074        int page_shift;
1075        int inlen;
1076        int uninitialized_var(cqe_size);
1077        unsigned long flags;
1078
1079        if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
1080                pr_info("Firmware does not support resize CQ\n");
1081                return -ENOSYS;
1082        }
1083
1084        if (entries < 1)
1085                return -EINVAL;
1086
1087        entries = roundup_pow_of_two(entries + 1);
1088        if (entries > dev->mdev->caps.gen.max_cqes + 1)
1089                return -EINVAL;
1090
1091        if (entries == ibcq->cqe + 1)
1092                return 0;
1093
1094        mutex_lock(&cq->resize_mutex);
1095        if (udata) {
1096                err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1097                                  &cqe_size);
1098        } else {
1099                cqe_size = 64;
1100                err = resize_kernel(dev, cq, entries, cqe_size);
1101                if (!err) {
1102                        npas = cq->resize_buf->buf.npages;
1103                        page_shift = cq->resize_buf->buf.page_shift;
1104                }
1105        }
1106
1107        if (err)
1108                goto ex;
1109
1110        inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
1111        in = mlx5_vzalloc(inlen);
1112        if (!in) {
1113                err = -ENOMEM;
1114                goto ex_resize;
1115        }
1116
1117        if (udata)
1118                mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1119                                     in->pas, 0);
1120        else
1121                mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
1122
1123        in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE  |
1124                                       MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1125                                       MLX5_MODIFY_CQ_MASK_PG_SIZE);
1126        in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
1127        in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
1128        in->ctx.page_offset = 0;
1129        in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
1130        in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
1131        in->cqn = cpu_to_be32(cq->mcq.cqn);
1132
1133        err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1134        if (err)
1135                goto ex_alloc;
1136
1137        if (udata) {
1138                cq->ibcq.cqe = entries - 1;
1139                ib_umem_release(cq->buf.umem);
1140                cq->buf.umem = cq->resize_umem;
1141                cq->resize_umem = NULL;
1142        } else {
1143                struct mlx5_ib_cq_buf tbuf;
1144                int resized = 0;
1145
1146                spin_lock_irqsave(&cq->lock, flags);
1147                if (cq->resize_buf) {
1148                        err = copy_resize_cqes(cq);
1149                        if (!err) {
1150                                tbuf = cq->buf;
1151                                cq->buf = *cq->resize_buf;
1152                                kfree(cq->resize_buf);
1153                                cq->resize_buf = NULL;
1154                                resized = 1;
1155                        }
1156                }
1157                cq->ibcq.cqe = entries - 1;
1158                spin_unlock_irqrestore(&cq->lock, flags);
1159                if (resized)
1160                        free_cq_buf(dev, &tbuf);
1161        }
1162        mutex_unlock(&cq->resize_mutex);
1163
1164        kvfree(in);
1165        return 0;
1166
1167ex_alloc:
1168        kvfree(in);
1169
1170ex_resize:
1171        if (udata)
1172                un_resize_user(cq);
1173        else
1174                un_resize_kernel(dev, cq);
1175ex:
1176        mutex_unlock(&cq->resize_mutex);
1177        return err;
1178}
1179
1180int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1181{
1182        struct mlx5_ib_cq *cq;
1183
1184        if (!ibcq)
1185                return 128;
1186
1187        cq = to_mcq(ibcq);
1188        return cq->cqe_size;
1189}
1190