linux/drivers/infiniband/hw/mlx5/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/kref.h>
  34#include <rdma/ib_umem.h>
  35#include <rdma/ib_user_verbs.h>
  36#include <rdma/ib_cache.h>
  37#include "mlx5_ib.h"
  38
  39static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
  40{
  41        struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
  42
  43        ibcq->comp_handler(ibcq, ibcq->cq_context);
  44}
  45
  46static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
  47{
  48        struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
  49        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
  50        struct ib_cq *ibcq = &cq->ibcq;
  51        struct ib_event event;
  52
  53        if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
  54                mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
  55                             type, mcq->cqn);
  56                return;
  57        }
  58
  59        if (ibcq->event_handler) {
  60                event.device     = &dev->ib_dev;
  61                event.event      = IB_EVENT_CQ_ERR;
  62                event.element.cq = ibcq;
  63                ibcq->event_handler(&event, ibcq->cq_context);
  64        }
  65}
  66
  67static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
  68{
  69        return mlx5_buf_offset(&buf->buf, n * size);
  70}
  71
  72static void *get_cqe(struct mlx5_ib_cq *cq, int n)
  73{
  74        return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
  75}
  76
  77static u8 sw_ownership_bit(int n, int nent)
  78{
  79        return (n & nent) ? 1 : 0;
  80}
  81
  82static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
  83{
  84        void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
  85        struct mlx5_cqe64 *cqe64;
  86
  87        cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
  88
  89        if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
  90            !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
  91                return cqe;
  92        } else {
  93                return NULL;
  94        }
  95}
  96
  97static void *next_cqe_sw(struct mlx5_ib_cq *cq)
  98{
  99        return get_sw_cqe(cq, cq->mcq.cons_index);
 100}
 101
 102static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
 103{
 104        switch (wq->wr_data[idx]) {
 105        case MLX5_IB_WR_UMR:
 106                return 0;
 107
 108        case IB_WR_LOCAL_INV:
 109                return IB_WC_LOCAL_INV;
 110
 111        case IB_WR_REG_MR:
 112                return IB_WC_REG_MR;
 113
 114        default:
 115                pr_warn("unknown completion status\n");
 116                return 0;
 117        }
 118}
 119
 120static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
 121                            struct mlx5_ib_wq *wq, int idx)
 122{
 123        wc->wc_flags = 0;
 124        switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
 125        case MLX5_OPCODE_RDMA_WRITE_IMM:
 126                wc->wc_flags |= IB_WC_WITH_IMM;
 127        case MLX5_OPCODE_RDMA_WRITE:
 128                wc->opcode    = IB_WC_RDMA_WRITE;
 129                break;
 130        case MLX5_OPCODE_SEND_IMM:
 131                wc->wc_flags |= IB_WC_WITH_IMM;
 132        case MLX5_OPCODE_SEND:
 133        case MLX5_OPCODE_SEND_INVAL:
 134                wc->opcode    = IB_WC_SEND;
 135                break;
 136        case MLX5_OPCODE_RDMA_READ:
 137                wc->opcode    = IB_WC_RDMA_READ;
 138                wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
 139                break;
 140        case MLX5_OPCODE_ATOMIC_CS:
 141                wc->opcode    = IB_WC_COMP_SWAP;
 142                wc->byte_len  = 8;
 143                break;
 144        case MLX5_OPCODE_ATOMIC_FA:
 145                wc->opcode    = IB_WC_FETCH_ADD;
 146                wc->byte_len  = 8;
 147                break;
 148        case MLX5_OPCODE_ATOMIC_MASKED_CS:
 149                wc->opcode    = IB_WC_MASKED_COMP_SWAP;
 150                wc->byte_len  = 8;
 151                break;
 152        case MLX5_OPCODE_ATOMIC_MASKED_FA:
 153                wc->opcode    = IB_WC_MASKED_FETCH_ADD;
 154                wc->byte_len  = 8;
 155                break;
 156        case MLX5_OPCODE_UMR:
 157                wc->opcode = get_umr_comp(wq, idx);
 158                break;
 159        }
 160}
 161
 162enum {
 163        MLX5_GRH_IN_BUFFER = 1,
 164        MLX5_GRH_IN_CQE    = 2,
 165};
 166
 167static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
 168                             struct mlx5_ib_qp *qp)
 169{
 170        enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
 171        struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
 172        struct mlx5_ib_srq *srq;
 173        struct mlx5_ib_wq *wq;
 174        u16 wqe_ctr;
 175        u8  roce_packet_type;
 176        bool vlan_present;
 177        u8 g;
 178
 179        if (qp->ibqp.srq || qp->ibqp.xrcd) {
 180                struct mlx5_core_srq *msrq = NULL;
 181
 182                if (qp->ibqp.xrcd) {
 183                        msrq = mlx5_core_get_srq(dev->mdev,
 184                                                 be32_to_cpu(cqe->srqn));
 185                        srq = to_mibsrq(msrq);
 186                } else {
 187                        srq = to_msrq(qp->ibqp.srq);
 188                }
 189                if (srq) {
 190                        wqe_ctr = be16_to_cpu(cqe->wqe_counter);
 191                        wc->wr_id = srq->wrid[wqe_ctr];
 192                        mlx5_ib_free_srq_wqe(srq, wqe_ctr);
 193                        if (msrq && atomic_dec_and_test(&msrq->refcount))
 194                                complete(&msrq->free);
 195                }
 196        } else {
 197                wq        = &qp->rq;
 198                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 199                ++wq->tail;
 200        }
 201        wc->byte_len = be32_to_cpu(cqe->byte_cnt);
 202
 203        switch (cqe->op_own >> 4) {
 204        case MLX5_CQE_RESP_WR_IMM:
 205                wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
 206                wc->wc_flags    = IB_WC_WITH_IMM;
 207                wc->ex.imm_data = cqe->imm_inval_pkey;
 208                break;
 209        case MLX5_CQE_RESP_SEND:
 210                wc->opcode   = IB_WC_RECV;
 211                wc->wc_flags = IB_WC_IP_CSUM_OK;
 212                if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
 213                               (cqe->hds_ip_ext & CQE_L4_OK))))
 214                        wc->wc_flags = 0;
 215                break;
 216        case MLX5_CQE_RESP_SEND_IMM:
 217                wc->opcode      = IB_WC_RECV;
 218                wc->wc_flags    = IB_WC_WITH_IMM;
 219                wc->ex.imm_data = cqe->imm_inval_pkey;
 220                break;
 221        case MLX5_CQE_RESP_SEND_INV:
 222                wc->opcode      = IB_WC_RECV;
 223                wc->wc_flags    = IB_WC_WITH_INVALIDATE;
 224                wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
 225                break;
 226        }
 227        wc->slid           = be16_to_cpu(cqe->slid);
 228        wc->src_qp         = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
 229        wc->dlid_path_bits = cqe->ml_path;
 230        g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
 231        wc->wc_flags |= g ? IB_WC_GRH : 0;
 232        if (unlikely(is_qp1(qp->ibqp.qp_type))) {
 233                u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
 234
 235                ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
 236                                    &wc->pkey_index);
 237        } else {
 238                wc->pkey_index = 0;
 239        }
 240
 241        if (ll != IB_LINK_LAYER_ETHERNET) {
 242                wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
 243                return;
 244        }
 245
 246        vlan_present = cqe->l4_l3_hdr_type & 0x1;
 247        roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
 248        if (vlan_present) {
 249                wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
 250                wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
 251                wc->wc_flags |= IB_WC_WITH_VLAN;
 252        } else {
 253                wc->sl = 0;
 254        }
 255
 256        switch (roce_packet_type) {
 257        case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
 258                wc->network_hdr_type = RDMA_NETWORK_IB;
 259                break;
 260        case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
 261                wc->network_hdr_type = RDMA_NETWORK_IPV6;
 262                break;
 263        case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
 264                wc->network_hdr_type = RDMA_NETWORK_IPV4;
 265                break;
 266        }
 267        wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
 268}
 269
 270static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
 271{
 272        __be32 *p = (__be32 *)cqe;
 273        int i;
 274
 275        mlx5_ib_warn(dev, "dump error cqe\n");
 276        for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
 277                pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
 278                        be32_to_cpu(p[1]), be32_to_cpu(p[2]),
 279                        be32_to_cpu(p[3]));
 280}
 281
 282static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
 283                                  struct mlx5_err_cqe *cqe,
 284                                  struct ib_wc *wc)
 285{
 286        int dump = 1;
 287
 288        switch (cqe->syndrome) {
 289        case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
 290                wc->status = IB_WC_LOC_LEN_ERR;
 291                break;
 292        case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
 293                wc->status = IB_WC_LOC_QP_OP_ERR;
 294                break;
 295        case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
 296                wc->status = IB_WC_LOC_PROT_ERR;
 297                break;
 298        case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
 299                dump = 0;
 300                wc->status = IB_WC_WR_FLUSH_ERR;
 301                break;
 302        case MLX5_CQE_SYNDROME_MW_BIND_ERR:
 303                wc->status = IB_WC_MW_BIND_ERR;
 304                break;
 305        case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
 306                wc->status = IB_WC_BAD_RESP_ERR;
 307                break;
 308        case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
 309                wc->status = IB_WC_LOC_ACCESS_ERR;
 310                break;
 311        case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
 312                wc->status = IB_WC_REM_INV_REQ_ERR;
 313                break;
 314        case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
 315                wc->status = IB_WC_REM_ACCESS_ERR;
 316                break;
 317        case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
 318                wc->status = IB_WC_REM_OP_ERR;
 319                break;
 320        case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
 321                wc->status = IB_WC_RETRY_EXC_ERR;
 322                dump = 0;
 323                break;
 324        case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
 325                wc->status = IB_WC_RNR_RETRY_EXC_ERR;
 326                dump = 0;
 327                break;
 328        case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
 329                wc->status = IB_WC_REM_ABORT_ERR;
 330                break;
 331        default:
 332                wc->status = IB_WC_GENERAL_ERR;
 333                break;
 334        }
 335
 336        wc->vendor_err = cqe->vendor_err_synd;
 337        if (dump)
 338                dump_cqe(dev, cqe);
 339}
 340
 341static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
 342{
 343        /* TBD: waiting decision
 344        */
 345        return 0;
 346}
 347
 348static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
 349{
 350        struct mlx5_wqe_data_seg *dpseg;
 351        void *addr;
 352
 353        dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
 354                sizeof(struct mlx5_wqe_raddr_seg) +
 355                sizeof(struct mlx5_wqe_atomic_seg);
 356        addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
 357        return addr;
 358}
 359
 360static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 361                          uint16_t idx)
 362{
 363        void *addr;
 364        int byte_count;
 365        int i;
 366
 367        if (!is_atomic_response(qp, idx))
 368                return;
 369
 370        byte_count = be32_to_cpu(cqe64->byte_cnt);
 371        addr = mlx5_get_atomic_laddr(qp, idx);
 372
 373        if (byte_count == 4) {
 374                *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
 375        } else {
 376                for (i = 0; i < byte_count; i += 8) {
 377                        *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
 378                        addr += 8;
 379                }
 380        }
 381
 382        return;
 383}
 384
 385static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 386                           u16 tail, u16 head)
 387{
 388        u16 idx;
 389
 390        do {
 391                idx = tail & (qp->sq.wqe_cnt - 1);
 392                handle_atomic(qp, cqe64, idx);
 393                if (idx == head)
 394                        break;
 395
 396                tail = qp->sq.w_list[idx].next;
 397        } while (1);
 398        tail = qp->sq.w_list[idx].next;
 399        qp->sq.last_poll = tail;
 400}
 401
 402static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 403{
 404        mlx5_buf_free(dev->mdev, &buf->buf);
 405}
 406
 407static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
 408                             struct ib_sig_err *item)
 409{
 410        u16 syndrome = be16_to_cpu(cqe->syndrome);
 411
 412#define GUARD_ERR   (1 << 13)
 413#define APPTAG_ERR  (1 << 12)
 414#define REFTAG_ERR  (1 << 11)
 415
 416        if (syndrome & GUARD_ERR) {
 417                item->err_type = IB_SIG_BAD_GUARD;
 418                item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
 419                item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
 420        } else
 421        if (syndrome & REFTAG_ERR) {
 422                item->err_type = IB_SIG_BAD_REFTAG;
 423                item->expected = be32_to_cpu(cqe->expected_reftag);
 424                item->actual = be32_to_cpu(cqe->actual_reftag);
 425        } else
 426        if (syndrome & APPTAG_ERR) {
 427                item->err_type = IB_SIG_BAD_APPTAG;
 428                item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
 429                item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
 430        } else {
 431                pr_err("Got signature completion error with bad syndrome %04x\n",
 432                       syndrome);
 433        }
 434
 435        item->sig_err_offset = be64_to_cpu(cqe->err_offset);
 436        item->key = be32_to_cpu(cqe->mkey);
 437}
 438
 439static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
 440                         struct ib_wc *wc, int *npolled)
 441{
 442        struct mlx5_ib_wq *wq;
 443        unsigned int cur;
 444        unsigned int idx;
 445        int np;
 446        int i;
 447
 448        wq = &qp->sq;
 449        cur = wq->head - wq->tail;
 450        np = *npolled;
 451
 452        if (cur == 0)
 453                return;
 454
 455        for (i = 0;  i < cur && np < num_entries; i++) {
 456                idx = wq->last_poll & (wq->wqe_cnt - 1);
 457                wc->wr_id = wq->wrid[idx];
 458                wc->status = IB_WC_WR_FLUSH_ERR;
 459                wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
 460                wq->tail++;
 461                np++;
 462                wc->qp = &qp->ibqp;
 463                wc++;
 464                wq->last_poll = wq->w_list[idx].next;
 465        }
 466        *npolled = np;
 467}
 468
 469static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
 470                         struct ib_wc *wc, int *npolled)
 471{
 472        struct mlx5_ib_wq *wq;
 473        unsigned int cur;
 474        int np;
 475        int i;
 476
 477        wq = &qp->rq;
 478        cur = wq->head - wq->tail;
 479        np = *npolled;
 480
 481        if (cur == 0)
 482                return;
 483
 484        for (i = 0;  i < cur && np < num_entries; i++) {
 485                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 486                wc->status = IB_WC_WR_FLUSH_ERR;
 487                wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
 488                wq->tail++;
 489                np++;
 490                wc->qp = &qp->ibqp;
 491                wc++;
 492        }
 493        *npolled = np;
 494}
 495
 496static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
 497                                 struct ib_wc *wc, int *npolled)
 498{
 499        struct mlx5_ib_qp *qp;
 500
 501        *npolled = 0;
 502        /* Find uncompleted WQEs belonging to that cq and return mmics ones */
 503        list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
 504                sw_send_comp(qp, num_entries, wc + *npolled, npolled);
 505                if (*npolled >= num_entries)
 506                        return;
 507        }
 508
 509        list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
 510                sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
 511                if (*npolled >= num_entries)
 512                        return;
 513        }
 514}
 515
 516static int mlx5_poll_one(struct mlx5_ib_cq *cq,
 517                         struct mlx5_ib_qp **cur_qp,
 518                         struct ib_wc *wc)
 519{
 520        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
 521        struct mlx5_err_cqe *err_cqe;
 522        struct mlx5_cqe64 *cqe64;
 523        struct mlx5_core_qp *mqp;
 524        struct mlx5_ib_wq *wq;
 525        struct mlx5_sig_err_cqe *sig_err_cqe;
 526        struct mlx5_core_mkey *mmkey;
 527        struct mlx5_ib_mr *mr;
 528        uint8_t opcode;
 529        uint32_t qpn;
 530        u16 wqe_ctr;
 531        void *cqe;
 532        int idx;
 533
 534repoll:
 535        cqe = next_cqe_sw(cq);
 536        if (!cqe)
 537                return -EAGAIN;
 538
 539        cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
 540
 541        ++cq->mcq.cons_index;
 542
 543        /* Make sure we read CQ entry contents after we've checked the
 544         * ownership bit.
 545         */
 546        rmb();
 547
 548        opcode = cqe64->op_own >> 4;
 549        if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
 550                if (likely(cq->resize_buf)) {
 551                        free_cq_buf(dev, &cq->buf);
 552                        cq->buf = *cq->resize_buf;
 553                        kfree(cq->resize_buf);
 554                        cq->resize_buf = NULL;
 555                        goto repoll;
 556                } else {
 557                        mlx5_ib_warn(dev, "unexpected resize cqe\n");
 558                }
 559        }
 560
 561        qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
 562        if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
 563                /* We do not have to take the QP table lock here,
 564                 * because CQs will be locked while QPs are removed
 565                 * from the table.
 566                 */
 567                mqp = __mlx5_qp_lookup(dev->mdev, qpn);
 568                *cur_qp = to_mibqp(mqp);
 569        }
 570
 571        wc->qp  = &(*cur_qp)->ibqp;
 572        switch (opcode) {
 573        case MLX5_CQE_REQ:
 574                wq = &(*cur_qp)->sq;
 575                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
 576                idx = wqe_ctr & (wq->wqe_cnt - 1);
 577                handle_good_req(wc, cqe64, wq, idx);
 578                handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
 579                wc->wr_id = wq->wrid[idx];
 580                wq->tail = wq->wqe_head[idx] + 1;
 581                wc->status = IB_WC_SUCCESS;
 582                break;
 583        case MLX5_CQE_RESP_WR_IMM:
 584        case MLX5_CQE_RESP_SEND:
 585        case MLX5_CQE_RESP_SEND_IMM:
 586        case MLX5_CQE_RESP_SEND_INV:
 587                handle_responder(wc, cqe64, *cur_qp);
 588                wc->status = IB_WC_SUCCESS;
 589                break;
 590        case MLX5_CQE_RESIZE_CQ:
 591                break;
 592        case MLX5_CQE_REQ_ERR:
 593        case MLX5_CQE_RESP_ERR:
 594                err_cqe = (struct mlx5_err_cqe *)cqe64;
 595                mlx5_handle_error_cqe(dev, err_cqe, wc);
 596                mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
 597                            opcode == MLX5_CQE_REQ_ERR ?
 598                            "Requestor" : "Responder", cq->mcq.cqn);
 599                mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
 600                            err_cqe->syndrome, err_cqe->vendor_err_synd);
 601                if (opcode == MLX5_CQE_REQ_ERR) {
 602                        wq = &(*cur_qp)->sq;
 603                        wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
 604                        idx = wqe_ctr & (wq->wqe_cnt - 1);
 605                        wc->wr_id = wq->wrid[idx];
 606                        wq->tail = wq->wqe_head[idx] + 1;
 607                } else {
 608                        struct mlx5_ib_srq *srq;
 609
 610                        if ((*cur_qp)->ibqp.srq) {
 611                                srq = to_msrq((*cur_qp)->ibqp.srq);
 612                                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
 613                                wc->wr_id = srq->wrid[wqe_ctr];
 614                                mlx5_ib_free_srq_wqe(srq, wqe_ctr);
 615                        } else {
 616                                wq = &(*cur_qp)->rq;
 617                                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 618                                ++wq->tail;
 619                        }
 620                }
 621                break;
 622        case MLX5_CQE_SIG_ERR:
 623                sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
 624
 625                read_lock(&dev->mdev->priv.mkey_table.lock);
 626                mmkey = __mlx5_mr_lookup(dev->mdev,
 627                                         mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
 628                mr = to_mibmr(mmkey);
 629                get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
 630                mr->sig->sig_err_exists = true;
 631                mr->sig->sigerr_count++;
 632
 633                mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
 634                             cq->mcq.cqn, mr->sig->err_item.key,
 635                             mr->sig->err_item.err_type,
 636                             mr->sig->err_item.sig_err_offset,
 637                             mr->sig->err_item.expected,
 638                             mr->sig->err_item.actual);
 639
 640                read_unlock(&dev->mdev->priv.mkey_table.lock);
 641                goto repoll;
 642        }
 643
 644        return 0;
 645}
 646
 647static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
 648                        struct ib_wc *wc)
 649{
 650        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
 651        struct mlx5_ib_wc *soft_wc, *next;
 652        int npolled = 0;
 653
 654        list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
 655                if (npolled >= num_entries)
 656                        break;
 657
 658                mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
 659                            cq->mcq.cqn);
 660
 661                wc[npolled++] = soft_wc->wc;
 662                list_del(&soft_wc->list);
 663                kfree(soft_wc);
 664        }
 665
 666        return npolled;
 667}
 668
 669int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 670{
 671        struct mlx5_ib_cq *cq = to_mcq(ibcq);
 672        struct mlx5_ib_qp *cur_qp = NULL;
 673        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
 674        struct mlx5_core_dev *mdev = dev->mdev;
 675        unsigned long flags;
 676        int soft_polled = 0;
 677        int npolled;
 678
 679        spin_lock_irqsave(&cq->lock, flags);
 680        if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
 681                mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
 682                goto out;
 683        }
 684
 685        if (unlikely(!list_empty(&cq->wc_list)))
 686                soft_polled = poll_soft_wc(cq, num_entries, wc);
 687
 688        for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
 689                if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
 690                        break;
 691        }
 692
 693        if (npolled)
 694                mlx5_cq_set_ci(&cq->mcq);
 695out:
 696        spin_unlock_irqrestore(&cq->lock, flags);
 697
 698        return soft_polled + npolled;
 699}
 700
 701int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 702{
 703        struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
 704        struct mlx5_ib_cq *cq = to_mcq(ibcq);
 705        void __iomem *uar_page = mdev->priv.uar->map;
 706        unsigned long irq_flags;
 707        int ret = 0;
 708
 709        spin_lock_irqsave(&cq->lock, irq_flags);
 710        if (cq->notify_flags != IB_CQ_NEXT_COMP)
 711                cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
 712
 713        if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
 714                ret = 1;
 715        spin_unlock_irqrestore(&cq->lock, irq_flags);
 716
 717        mlx5_cq_arm(&cq->mcq,
 718                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
 719                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
 720                    uar_page, to_mcq(ibcq)->mcq.cons_index);
 721
 722        return ret;
 723}
 724
 725static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
 726                        int nent, int cqe_size)
 727{
 728        int err;
 729
 730        err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
 731        if (err)
 732                return err;
 733
 734        buf->cqe_size = cqe_size;
 735        buf->nent = nent;
 736
 737        return 0;
 738}
 739
 740static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
 741                          struct ib_ucontext *context, struct mlx5_ib_cq *cq,
 742                          int entries, u32 **cqb,
 743                          int *cqe_size, int *index, int *inlen)
 744{
 745        struct mlx5_ib_create_cq ucmd = {};
 746        size_t ucmdlen;
 747        int page_shift;
 748        __be64 *pas;
 749        int npages;
 750        int ncont;
 751        void *cqc;
 752        int err;
 753
 754        ucmdlen = udata->inlen < sizeof(ucmd) ?
 755                  (sizeof(ucmd) - sizeof(ucmd.reserved)) : sizeof(ucmd);
 756
 757        if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
 758                return -EFAULT;
 759
 760        if (ucmdlen == sizeof(ucmd) &&
 761            ucmd.reserved != 0)
 762                return -EINVAL;
 763
 764        if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
 765                return -EINVAL;
 766
 767        *cqe_size = ucmd.cqe_size;
 768
 769        cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
 770                                   entries * ucmd.cqe_size,
 771                                   IB_ACCESS_LOCAL_WRITE, 1);
 772        if (IS_ERR(cq->buf.umem)) {
 773                err = PTR_ERR(cq->buf.umem);
 774                return err;
 775        }
 776
 777        err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
 778                                  &cq->db);
 779        if (err)
 780                goto err_umem;
 781
 782        mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
 783                           &ncont, NULL);
 784        mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
 785                    ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
 786
 787        *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 788                 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
 789        *cqb = kvzalloc(*inlen, GFP_KERNEL);
 790        if (!*cqb) {
 791                err = -ENOMEM;
 792                goto err_db;
 793        }
 794
 795        pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
 796        mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
 797
 798        cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
 799        MLX5_SET(cqc, cqc, log_page_size,
 800                 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 801
 802        *index = to_mucontext(context)->bfregi.sys_pages[0];
 803
 804        if (ucmd.cqe_comp_en == 1) {
 805                if (unlikely((*cqe_size != 64) ||
 806                             !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
 807                        err = -EOPNOTSUPP;
 808                        mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
 809                                     *cqe_size);
 810                        goto err_cqb;
 811                }
 812
 813                if (unlikely(!ucmd.cqe_comp_res_format ||
 814                             !(ucmd.cqe_comp_res_format <
 815                               MLX5_IB_CQE_RES_RESERVED) ||
 816                             (ucmd.cqe_comp_res_format &
 817                              (ucmd.cqe_comp_res_format - 1)))) {
 818                        err = -EOPNOTSUPP;
 819                        mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
 820                                     ucmd.cqe_comp_res_format);
 821                        goto err_cqb;
 822                }
 823
 824                MLX5_SET(cqc, cqc, cqe_comp_en, 1);
 825                MLX5_SET(cqc, cqc, mini_cqe_res_format,
 826                         ilog2(ucmd.cqe_comp_res_format));
 827        }
 828
 829        return 0;
 830
 831err_cqb:
 832        kfree(*cqb);
 833
 834err_db:
 835        mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
 836
 837err_umem:
 838        ib_umem_release(cq->buf.umem);
 839        return err;
 840}
 841
 842static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
 843{
 844        mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
 845        ib_umem_release(cq->buf.umem);
 846}
 847
 848static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
 849{
 850        int i;
 851        void *cqe;
 852        struct mlx5_cqe64 *cqe64;
 853
 854        for (i = 0; i < buf->nent; i++) {
 855                cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
 856                cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
 857                cqe64->op_own = MLX5_CQE_INVALID << 4;
 858        }
 859}
 860
 861static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 862                            int entries, int cqe_size,
 863                            u32 **cqb, int *index, int *inlen)
 864{
 865        __be64 *pas;
 866        void *cqc;
 867        int err;
 868
 869        err = mlx5_db_alloc(dev->mdev, &cq->db);
 870        if (err)
 871                return err;
 872
 873        cq->mcq.set_ci_db  = cq->db.db;
 874        cq->mcq.arm_db     = cq->db.db + 1;
 875        cq->mcq.cqe_sz = cqe_size;
 876
 877        err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
 878        if (err)
 879                goto err_db;
 880
 881        init_cq_buf(cq, &cq->buf);
 882
 883        *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 884                 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
 885        *cqb = kvzalloc(*inlen, GFP_KERNEL);
 886        if (!*cqb) {
 887                err = -ENOMEM;
 888                goto err_buf;
 889        }
 890
 891        pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
 892        mlx5_fill_page_array(&cq->buf.buf, pas);
 893
 894        cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
 895        MLX5_SET(cqc, cqc, log_page_size,
 896                 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 897
 898        *index = dev->mdev->priv.uar->index;
 899
 900        return 0;
 901
 902err_buf:
 903        free_cq_buf(dev, &cq->buf);
 904
 905err_db:
 906        mlx5_db_free(dev->mdev, &cq->db);
 907        return err;
 908}
 909
 910static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
 911{
 912        free_cq_buf(dev, &cq->buf);
 913        mlx5_db_free(dev->mdev, &cq->db);
 914}
 915
 916static void notify_soft_wc_handler(struct work_struct *work)
 917{
 918        struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
 919                                             notify_work);
 920
 921        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 922}
 923
 924struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
 925                                const struct ib_cq_init_attr *attr,
 926                                struct ib_ucontext *context,
 927                                struct ib_udata *udata)
 928{
 929        int entries = attr->cqe;
 930        int vector = attr->comp_vector;
 931        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 932        struct mlx5_ib_cq *cq;
 933        int uninitialized_var(index);
 934        int uninitialized_var(inlen);
 935        u32 *cqb = NULL;
 936        void *cqc;
 937        int cqe_size;
 938        unsigned int irqn;
 939        int eqn;
 940        int err;
 941
 942        if (entries < 0 ||
 943            (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
 944                return ERR_PTR(-EINVAL);
 945
 946        if (check_cq_create_flags(attr->flags))
 947                return ERR_PTR(-EOPNOTSUPP);
 948
 949        entries = roundup_pow_of_two(entries + 1);
 950        if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
 951                return ERR_PTR(-EINVAL);
 952
 953        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 954        if (!cq)
 955                return ERR_PTR(-ENOMEM);
 956
 957        cq->ibcq.cqe = entries - 1;
 958        mutex_init(&cq->resize_mutex);
 959        spin_lock_init(&cq->lock);
 960        cq->resize_buf = NULL;
 961        cq->resize_umem = NULL;
 962        cq->create_flags = attr->flags;
 963        INIT_LIST_HEAD(&cq->list_send_qp);
 964        INIT_LIST_HEAD(&cq->list_recv_qp);
 965
 966        if (context) {
 967                err = create_cq_user(dev, udata, context, cq, entries,
 968                                     &cqb, &cqe_size, &index, &inlen);
 969                if (err)
 970                        goto err_create;
 971        } else {
 972                cqe_size = cache_line_size() == 128 ? 128 : 64;
 973                err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
 974                                       &index, &inlen);
 975                if (err)
 976                        goto err_create;
 977
 978                INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
 979        }
 980
 981        err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
 982        if (err)
 983                goto err_cqb;
 984
 985        cq->cqe_size = cqe_size;
 986
 987        cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
 988        MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
 989        MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
 990        MLX5_SET(cqc, cqc, uar_page, index);
 991        MLX5_SET(cqc, cqc, c_eqn, eqn);
 992        MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
 993        if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
 994                MLX5_SET(cqc, cqc, oi, 1);
 995
 996        err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
 997        if (err)
 998                goto err_cqb;
 999
1000        mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1001        cq->mcq.irqn = irqn;
1002        if (context)
1003                cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1004        else
1005                cq->mcq.comp  = mlx5_ib_cq_comp;
1006        cq->mcq.event = mlx5_ib_cq_event;
1007
1008        INIT_LIST_HEAD(&cq->wc_list);
1009
1010        if (context)
1011                if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1012                        err = -EFAULT;
1013                        goto err_cmd;
1014                }
1015
1016
1017        kvfree(cqb);
1018        return &cq->ibcq;
1019
1020err_cmd:
1021        mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1022
1023err_cqb:
1024        kvfree(cqb);
1025        if (context)
1026                destroy_cq_user(cq, context);
1027        else
1028                destroy_cq_kernel(dev, cq);
1029
1030err_create:
1031        kfree(cq);
1032
1033        return ERR_PTR(err);
1034}
1035
1036
1037int mlx5_ib_destroy_cq(struct ib_cq *cq)
1038{
1039        struct mlx5_ib_dev *dev = to_mdev(cq->device);
1040        struct mlx5_ib_cq *mcq = to_mcq(cq);
1041        struct ib_ucontext *context = NULL;
1042
1043        if (cq->uobject)
1044                context = cq->uobject->context;
1045
1046        mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1047        if (context)
1048                destroy_cq_user(mcq, context);
1049        else
1050                destroy_cq_kernel(dev, mcq);
1051
1052        kfree(mcq);
1053
1054        return 0;
1055}
1056
1057static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1058{
1059        return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1060}
1061
1062void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1063{
1064        struct mlx5_cqe64 *cqe64, *dest64;
1065        void *cqe, *dest;
1066        u32 prod_index;
1067        int nfreed = 0;
1068        u8 owner_bit;
1069
1070        if (!cq)
1071                return;
1072
1073        /* First we need to find the current producer index, so we
1074         * know where to start cleaning from.  It doesn't matter if HW
1075         * adds new entries after this loop -- the QP we're worried
1076         * about is already in RESET, so the new entries won't come
1077         * from our QP and therefore don't need to be checked.
1078         */
1079        for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1080                if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1081                        break;
1082
1083        /* Now sweep backwards through the CQ, removing CQ entries
1084         * that match our QP by copying older entries on top of them.
1085         */
1086        while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1087                cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1088                cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1089                if (is_equal_rsn(cqe64, rsn)) {
1090                        if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1091                                mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1092                        ++nfreed;
1093                } else if (nfreed) {
1094                        dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1095                        dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1096                        owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1097                        memcpy(dest, cqe, cq->mcq.cqe_sz);
1098                        dest64->op_own = owner_bit |
1099                                (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1100                }
1101        }
1102
1103        if (nfreed) {
1104                cq->mcq.cons_index += nfreed;
1105                /* Make sure update of buffer contents is done before
1106                 * updating consumer index.
1107                 */
1108                wmb();
1109                mlx5_cq_set_ci(&cq->mcq);
1110        }
1111}
1112
1113void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1114{
1115        if (!cq)
1116                return;
1117
1118        spin_lock_irq(&cq->lock);
1119        __mlx5_ib_cq_clean(cq, qpn, srq);
1120        spin_unlock_irq(&cq->lock);
1121}
1122
1123int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1124{
1125        struct mlx5_ib_dev *dev = to_mdev(cq->device);
1126        struct mlx5_ib_cq *mcq = to_mcq(cq);
1127        int err;
1128
1129        if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1130                return -ENOSYS;
1131
1132        err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1133                                             cq_period, cq_count);
1134        if (err)
1135                mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1136
1137        return err;
1138}
1139
1140static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1141                       int entries, struct ib_udata *udata, int *npas,
1142                       int *page_shift, int *cqe_size)
1143{
1144        struct mlx5_ib_resize_cq ucmd;
1145        struct ib_umem *umem;
1146        int err;
1147        int npages;
1148        struct ib_ucontext *context = cq->buf.umem->context;
1149
1150        err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1151        if (err)
1152                return err;
1153
1154        if (ucmd.reserved0 || ucmd.reserved1)
1155                return -EINVAL;
1156
1157        umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
1158                           IB_ACCESS_LOCAL_WRITE, 1);
1159        if (IS_ERR(umem)) {
1160                err = PTR_ERR(umem);
1161                return err;
1162        }
1163
1164        mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
1165                           npas, NULL);
1166
1167        cq->resize_umem = umem;
1168        *cqe_size = ucmd.cqe_size;
1169
1170        return 0;
1171}
1172
1173static void un_resize_user(struct mlx5_ib_cq *cq)
1174{
1175        ib_umem_release(cq->resize_umem);
1176}
1177
1178static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1179                         int entries, int cqe_size)
1180{
1181        int err;
1182
1183        cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1184        if (!cq->resize_buf)
1185                return -ENOMEM;
1186
1187        err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
1188        if (err)
1189                goto ex;
1190
1191        init_cq_buf(cq, cq->resize_buf);
1192
1193        return 0;
1194
1195ex:
1196        kfree(cq->resize_buf);
1197        return err;
1198}
1199
1200static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1201{
1202        free_cq_buf(dev, cq->resize_buf);
1203        cq->resize_buf = NULL;
1204}
1205
1206static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1207{
1208        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1209        struct mlx5_cqe64 *scqe64;
1210        struct mlx5_cqe64 *dcqe64;
1211        void *start_cqe;
1212        void *scqe;
1213        void *dcqe;
1214        int ssize;
1215        int dsize;
1216        int i;
1217        u8 sw_own;
1218
1219        ssize = cq->buf.cqe_size;
1220        dsize = cq->resize_buf->cqe_size;
1221        if (ssize != dsize) {
1222                mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1223                return -EINVAL;
1224        }
1225
1226        i = cq->mcq.cons_index;
1227        scqe = get_sw_cqe(cq, i);
1228        scqe64 = ssize == 64 ? scqe : scqe + 64;
1229        start_cqe = scqe;
1230        if (!scqe) {
1231                mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1232                return -EINVAL;
1233        }
1234
1235        while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1236                dcqe = get_cqe_from_buf(cq->resize_buf,
1237                                        (i + 1) & (cq->resize_buf->nent),
1238                                        dsize);
1239                dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1240                sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1241                memcpy(dcqe, scqe, dsize);
1242                dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1243
1244                ++i;
1245                scqe = get_sw_cqe(cq, i);
1246                scqe64 = ssize == 64 ? scqe : scqe + 64;
1247                if (!scqe) {
1248                        mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1249                        return -EINVAL;
1250                }
1251
1252                if (scqe == start_cqe) {
1253                        pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1254                                cq->mcq.cqn);
1255                        return -ENOMEM;
1256                }
1257        }
1258        ++cq->mcq.cons_index;
1259        return 0;
1260}
1261
1262int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1263{
1264        struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1265        struct mlx5_ib_cq *cq = to_mcq(ibcq);
1266        void *cqc;
1267        u32 *in;
1268        int err;
1269        int npas;
1270        __be64 *pas;
1271        int page_shift;
1272        int inlen;
1273        int uninitialized_var(cqe_size);
1274        unsigned long flags;
1275
1276        if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1277                pr_info("Firmware does not support resize CQ\n");
1278                return -ENOSYS;
1279        }
1280
1281        if (entries < 1 ||
1282            entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1283                mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1284                             entries,
1285                             1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1286                return -EINVAL;
1287        }
1288
1289        entries = roundup_pow_of_two(entries + 1);
1290        if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1291                return -EINVAL;
1292
1293        if (entries == ibcq->cqe + 1)
1294                return 0;
1295
1296        mutex_lock(&cq->resize_mutex);
1297        if (udata) {
1298                err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1299                                  &cqe_size);
1300        } else {
1301                cqe_size = 64;
1302                err = resize_kernel(dev, cq, entries, cqe_size);
1303                if (!err) {
1304                        npas = cq->resize_buf->buf.npages;
1305                        page_shift = cq->resize_buf->buf.page_shift;
1306                }
1307        }
1308
1309        if (err)
1310                goto ex;
1311
1312        inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1313                MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1314
1315        in = kvzalloc(inlen, GFP_KERNEL);
1316        if (!in) {
1317                err = -ENOMEM;
1318                goto ex_resize;
1319        }
1320
1321        pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1322        if (udata)
1323                mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1324                                     pas, 0);
1325        else
1326                mlx5_fill_page_array(&cq->resize_buf->buf, pas);
1327
1328        MLX5_SET(modify_cq_in, in,
1329                 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1330                 MLX5_MODIFY_CQ_MASK_LOG_SIZE  |
1331                 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1332                 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1333
1334        cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1335
1336        MLX5_SET(cqc, cqc, log_page_size,
1337                 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1338        MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
1339        MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1340
1341        MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1342        MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1343
1344        err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1345        if (err)
1346                goto ex_alloc;
1347
1348        if (udata) {
1349                cq->ibcq.cqe = entries - 1;
1350                ib_umem_release(cq->buf.umem);
1351                cq->buf.umem = cq->resize_umem;
1352                cq->resize_umem = NULL;
1353        } else {
1354                struct mlx5_ib_cq_buf tbuf;
1355                int resized = 0;
1356
1357                spin_lock_irqsave(&cq->lock, flags);
1358                if (cq->resize_buf) {
1359                        err = copy_resize_cqes(cq);
1360                        if (!err) {
1361                                tbuf = cq->buf;
1362                                cq->buf = *cq->resize_buf;
1363                                kfree(cq->resize_buf);
1364                                cq->resize_buf = NULL;
1365                                resized = 1;
1366                        }
1367                }
1368                cq->ibcq.cqe = entries - 1;
1369                spin_unlock_irqrestore(&cq->lock, flags);
1370                if (resized)
1371                        free_cq_buf(dev, &tbuf);
1372        }
1373        mutex_unlock(&cq->resize_mutex);
1374
1375        kvfree(in);
1376        return 0;
1377
1378ex_alloc:
1379        kvfree(in);
1380
1381ex_resize:
1382        if (udata)
1383                un_resize_user(cq);
1384        else
1385                un_resize_kernel(dev, cq);
1386ex:
1387        mutex_unlock(&cq->resize_mutex);
1388        return err;
1389}
1390
1391int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1392{
1393        struct mlx5_ib_cq *cq;
1394
1395        if (!ibcq)
1396                return 128;
1397
1398        cq = to_mcq(ibcq);
1399        return cq->cqe_size;
1400}
1401
1402/* Called from atomic context */
1403int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1404{
1405        struct mlx5_ib_wc *soft_wc;
1406        struct mlx5_ib_cq *cq = to_mcq(ibcq);
1407        unsigned long flags;
1408
1409        soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1410        if (!soft_wc)
1411                return -ENOMEM;
1412
1413        soft_wc->wc = *wc;
1414        spin_lock_irqsave(&cq->lock, flags);
1415        list_add_tail(&soft_wc->list, &cq->wc_list);
1416        if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1417            wc->status != IB_WC_SUCCESS) {
1418                cq->notify_flags = 0;
1419                schedule_work(&cq->notify_work);
1420        }
1421        spin_unlock_irqrestore(&cq->lock, flags);
1422
1423        return 0;
1424}
1425