linux/drivers/infiniband/hw/cxgb4/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "iw_cxgb4.h"
  34
  35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  36                      struct c4iw_dev_ucontext *uctx)
  37{
  38        struct fw_ri_res_wr *res_wr;
  39        struct fw_ri_res *res;
  40        int wr_len;
  41        struct c4iw_wr_wait wr_wait;
  42        struct sk_buff *skb;
  43        int ret;
  44
  45        wr_len = sizeof *res_wr + sizeof *res;
  46        skb = alloc_skb(wr_len, GFP_KERNEL);
  47        if (!skb)
  48                return -ENOMEM;
  49        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  50
  51        res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  52        memset(res_wr, 0, wr_len);
  53        res_wr->op_nres = cpu_to_be32(
  54                        FW_WR_OP(FW_RI_RES_WR) |
  55                        V_FW_RI_RES_WR_NRES(1) |
  56                        FW_WR_COMPL(1));
  57        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  58        res_wr->cookie = (unsigned long) &wr_wait;
  59        res = res_wr->res;
  60        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  61        res->u.cq.op = FW_RI_RES_OP_RESET;
  62        res->u.cq.iqid = cpu_to_be32(cq->cqid);
  63
  64        c4iw_init_wr_wait(&wr_wait);
  65        ret = c4iw_ofld_send(rdev, skb);
  66        if (!ret) {
  67                ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  68        }
  69
  70        kfree(cq->sw_queue);
  71        dma_free_coherent(&(rdev->lldi.pdev->dev),
  72                          cq->memsize, cq->queue,
  73                          dma_unmap_addr(cq, mapping));
  74        c4iw_put_cqid(rdev, cq->cqid, uctx);
  75        return ret;
  76}
  77
  78static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  79                     struct c4iw_dev_ucontext *uctx)
  80{
  81        struct fw_ri_res_wr *res_wr;
  82        struct fw_ri_res *res;
  83        int wr_len;
  84        int user = (uctx != &rdev->uctx);
  85        struct c4iw_wr_wait wr_wait;
  86        int ret;
  87        struct sk_buff *skb;
  88
  89        cq->cqid = c4iw_get_cqid(rdev, uctx);
  90        if (!cq->cqid) {
  91                ret = -ENOMEM;
  92                goto err1;
  93        }
  94
  95        if (!user) {
  96                cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  97                if (!cq->sw_queue) {
  98                        ret = -ENOMEM;
  99                        goto err2;
 100                }
 101        }
 102        cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
 103                                       &cq->dma_addr, GFP_KERNEL);
 104        if (!cq->queue) {
 105                ret = -ENOMEM;
 106                goto err3;
 107        }
 108        dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 109        memset(cq->queue, 0, cq->memsize);
 110
 111        /* build fw_ri_res_wr */
 112        wr_len = sizeof *res_wr + sizeof *res;
 113
 114        skb = alloc_skb(wr_len, GFP_KERNEL);
 115        if (!skb) {
 116                ret = -ENOMEM;
 117                goto err4;
 118        }
 119        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 120
 121        res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
 122        memset(res_wr, 0, wr_len);
 123        res_wr->op_nres = cpu_to_be32(
 124                        FW_WR_OP(FW_RI_RES_WR) |
 125                        V_FW_RI_RES_WR_NRES(1) |
 126                        FW_WR_COMPL(1));
 127        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 128        res_wr->cookie = (unsigned long) &wr_wait;
 129        res = res_wr->res;
 130        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 131        res->u.cq.op = FW_RI_RES_OP_WRITE;
 132        res->u.cq.iqid = cpu_to_be32(cq->cqid);
 133        res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
 134                        V_FW_RI_RES_WR_IQANUS(0) |
 135                        V_FW_RI_RES_WR_IQANUD(1) |
 136                        F_FW_RI_RES_WR_IQANDST |
 137                        V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
 138        res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
 139                        F_FW_RI_RES_WR_IQDROPRSS |
 140                        V_FW_RI_RES_WR_IQPCIECH(2) |
 141                        V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
 142                        F_FW_RI_RES_WR_IQO |
 143                        V_FW_RI_RES_WR_IQESIZE(1));
 144        res->u.cq.iqsize = cpu_to_be16(cq->size);
 145        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 146
 147        c4iw_init_wr_wait(&wr_wait);
 148
 149        ret = c4iw_ofld_send(rdev, skb);
 150        if (ret)
 151                goto err4;
 152        PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
 153        ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
 154        if (ret)
 155                goto err4;
 156
 157        cq->gen = 1;
 158        cq->gts = rdev->lldi.gts_reg;
 159        cq->rdev = rdev;
 160        if (user) {
 161                cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
 162                                        (cq->cqid << rdev->cqshift);
 163                cq->ugts &= PAGE_MASK;
 164        }
 165        return 0;
 166err4:
 167        dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
 168                          dma_unmap_addr(cq, mapping));
 169err3:
 170        kfree(cq->sw_queue);
 171err2:
 172        c4iw_put_cqid(rdev, cq->cqid, uctx);
 173err1:
 174        return ret;
 175}
 176
 177static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
 178{
 179        struct t4_cqe cqe;
 180
 181        PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 182             wq, cq, cq->sw_cidx, cq->sw_pidx);
 183        memset(&cqe, 0, sizeof(cqe));
 184        cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
 185                                 V_CQE_OPCODE(FW_RI_SEND) |
 186                                 V_CQE_TYPE(0) |
 187                                 V_CQE_SWCQE(1) |
 188                                 V_CQE_QPID(wq->sq.qid));
 189        cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
 190        cq->sw_queue[cq->sw_pidx] = cqe;
 191        t4_swcq_produce(cq);
 192}
 193
 194int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
 195{
 196        int flushed = 0;
 197        int in_use = wq->rq.in_use - count;
 198
 199        BUG_ON(in_use < 0);
 200        PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
 201             wq, cq, wq->rq.in_use, count);
 202        while (in_use--) {
 203                insert_recv_cqe(wq, cq);
 204                flushed++;
 205        }
 206        return flushed;
 207}
 208
 209static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
 210                          struct t4_swsqe *swcqe)
 211{
 212        struct t4_cqe cqe;
 213
 214        PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 215             wq, cq, cq->sw_cidx, cq->sw_pidx);
 216        memset(&cqe, 0, sizeof(cqe));
 217        cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
 218                                 V_CQE_OPCODE(swcqe->opcode) |
 219                                 V_CQE_TYPE(1) |
 220                                 V_CQE_SWCQE(1) |
 221                                 V_CQE_QPID(wq->sq.qid));
 222        CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
 223        cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
 224        cq->sw_queue[cq->sw_pidx] = cqe;
 225        t4_swcq_produce(cq);
 226}
 227
 228int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
 229{
 230        int flushed = 0;
 231        struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
 232        int in_use = wq->sq.in_use - count;
 233
 234        BUG_ON(in_use < 0);
 235        while (in_use--) {
 236                swsqe->signaled = 0;
 237                insert_sq_cqe(wq, cq, swsqe);
 238                swsqe++;
 239                if (swsqe == (wq->sq.sw_sq + wq->sq.size))
 240                        swsqe = wq->sq.sw_sq;
 241                flushed++;
 242        }
 243        return flushed;
 244}
 245
 246/*
 247 * Move all CQEs from the HWCQ into the SWCQ.
 248 */
 249void c4iw_flush_hw_cq(struct t4_cq *cq)
 250{
 251        struct t4_cqe *cqe = NULL, *swcqe;
 252        int ret;
 253
 254        PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
 255        ret = t4_next_hw_cqe(cq, &cqe);
 256        while (!ret) {
 257                PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
 258                     __func__, cq->cidx, cq->sw_pidx);
 259                swcqe = &cq->sw_queue[cq->sw_pidx];
 260                *swcqe = *cqe;
 261                swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
 262                t4_swcq_produce(cq);
 263                t4_hwcq_consume(cq);
 264                ret = t4_next_hw_cqe(cq, &cqe);
 265        }
 266}
 267
 268static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 269{
 270        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
 271                return 0;
 272
 273        if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
 274                return 0;
 275
 276        if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
 277                return 0;
 278
 279        if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
 280                return 0;
 281        return 1;
 282}
 283
 284void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 285{
 286        struct t4_cqe *cqe;
 287        u32 ptr;
 288
 289        *count = 0;
 290        ptr = cq->sw_cidx;
 291        while (ptr != cq->sw_pidx) {
 292                cqe = &cq->sw_queue[ptr];
 293                if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
 294                                      wq->sq.oldest_read)) &&
 295                    (CQE_QPID(cqe) == wq->sq.qid))
 296                        (*count)++;
 297                if (++ptr == cq->size)
 298                        ptr = 0;
 299        }
 300        PDBG("%s cq %p count %d\n", __func__, cq, *count);
 301}
 302
 303void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 304{
 305        struct t4_cqe *cqe;
 306        u32 ptr;
 307
 308        *count = 0;
 309        PDBG("%s count zero %d\n", __func__, *count);
 310        ptr = cq->sw_cidx;
 311        while (ptr != cq->sw_pidx) {
 312                cqe = &cq->sw_queue[ptr];
 313                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
 314                    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
 315                        (*count)++;
 316                if (++ptr == cq->size)
 317                        ptr = 0;
 318        }
 319        PDBG("%s cq %p count %d\n", __func__, cq, *count);
 320}
 321
 322static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
 323{
 324        struct t4_swsqe *swsqe;
 325        u16 ptr = wq->sq.cidx;
 326        int count = wq->sq.in_use;
 327        int unsignaled = 0;
 328
 329        swsqe = &wq->sq.sw_sq[ptr];
 330        while (count--)
 331                if (!swsqe->signaled) {
 332                        if (++ptr == wq->sq.size)
 333                                ptr = 0;
 334                        swsqe = &wq->sq.sw_sq[ptr];
 335                        unsignaled++;
 336                } else if (swsqe->complete) {
 337
 338                        /*
 339                         * Insert this completed cqe into the swcq.
 340                         */
 341                        PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
 342                             __func__, ptr, cq->sw_pidx);
 343                        swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
 344                        cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
 345                        t4_swcq_produce(cq);
 346                        swsqe->signaled = 0;
 347                        wq->sq.in_use -= unsignaled;
 348                        break;
 349                } else
 350                        break;
 351}
 352
 353static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
 354                                struct t4_cqe *read_cqe)
 355{
 356        read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
 357        read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
 358        read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
 359                                 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
 360                                 V_CQE_OPCODE(FW_RI_READ_REQ) |
 361                                 V_CQE_TYPE(1));
 362        read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
 363}
 364
 365/*
 366 * Return a ptr to the next read wr in the SWSQ or NULL.
 367 */
 368static void advance_oldest_read(struct t4_wq *wq)
 369{
 370
 371        u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
 372
 373        if (rptr == wq->sq.size)
 374                rptr = 0;
 375        while (rptr != wq->sq.pidx) {
 376                wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
 377
 378                if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
 379                        return;
 380                if (++rptr == wq->sq.size)
 381                        rptr = 0;
 382        }
 383        wq->sq.oldest_read = NULL;
 384}
 385
 386/*
 387 * poll_cq
 388 *
 389 * Caller must:
 390 *     check the validity of the first CQE,
 391 *     supply the wq assicated with the qpid.
 392 *
 393 * credit: cq credit to return to sge.
 394 * cqe_flushed: 1 iff the CQE is flushed.
 395 * cqe: copy of the polled CQE.
 396 *
 397 * return value:
 398 *    0             CQE returned ok.
 399 *    -EAGAIN       CQE skipped, try again.
 400 *    -EOVERFLOW    CQ overflow detected.
 401 */
 402static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 403                   u8 *cqe_flushed, u64 *cookie, u32 *credit)
 404{
 405        int ret = 0;
 406        struct t4_cqe *hw_cqe, read_cqe;
 407
 408        *cqe_flushed = 0;
 409        *credit = 0;
 410        ret = t4_next_cqe(cq, &hw_cqe);
 411        if (ret)
 412                return ret;
 413
 414        PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
 415             " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
 416             __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
 417             CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
 418             CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
 419             CQE_WRID_LOW(hw_cqe));
 420
 421        /*
 422         * skip cqe's not affiliated with a QP.
 423         */
 424        if (wq == NULL) {
 425                ret = -EAGAIN;
 426                goto skip_cqe;
 427        }
 428
 429        /*
 430         * Gotta tweak READ completions:
 431         *      1) the cqe doesn't contain the sq_wptr from the wr.
 432         *      2) opcode not reflected from the wr.
 433         *      3) read_len not reflected from the wr.
 434         *      4) cq_type is RQ_TYPE not SQ_TYPE.
 435         */
 436        if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
 437
 438                /*
 439                 * If this is an unsolicited read response, then the read
 440                 * was generated by the kernel driver as part of peer-2-peer
 441                 * connection setup.  So ignore the completion.
 442                 */
 443                if (!wq->sq.oldest_read) {
 444                        if (CQE_STATUS(hw_cqe))
 445                                t4_set_wq_in_error(wq);
 446                        ret = -EAGAIN;
 447                        goto skip_cqe;
 448                }
 449
 450                /*
 451                 * Don't write to the HWCQ, so create a new read req CQE
 452                 * in local memory.
 453                 */
 454                create_read_req_cqe(wq, hw_cqe, &read_cqe);
 455                hw_cqe = &read_cqe;
 456                advance_oldest_read(wq);
 457        }
 458
 459        if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
 460                *cqe_flushed = t4_wq_in_error(wq);
 461                t4_set_wq_in_error(wq);
 462                goto proc_cqe;
 463        }
 464
 465        if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
 466                ret = -EAGAIN;
 467                goto skip_cqe;
 468        }
 469
 470        /*
 471         * RECV completion.
 472         */
 473        if (RQ_TYPE(hw_cqe)) {
 474
 475                /*
 476                 * HW only validates 4 bits of MSN.  So we must validate that
 477                 * the MSN in the SEND is the next expected MSN.  If its not,
 478                 * then we complete this with T4_ERR_MSN and mark the wq in
 479                 * error.
 480                 */
 481
 482                if (t4_rq_empty(wq)) {
 483                        t4_set_wq_in_error(wq);
 484                        ret = -EAGAIN;
 485                        goto skip_cqe;
 486                }
 487                if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
 488                        t4_set_wq_in_error(wq);
 489                        hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
 490                        goto proc_cqe;
 491                }
 492                goto proc_cqe;
 493        }
 494
 495        /*
 496         * If we get here its a send completion.
 497         *
 498         * Handle out of order completion. These get stuffed
 499         * in the SW SQ. Then the SW SQ is walked to move any
 500         * now in-order completions into the SW CQ.  This handles
 501         * 2 cases:
 502         *      1) reaping unsignaled WRs when the first subsequent
 503         *         signaled WR is completed.
 504         *      2) out of order read completions.
 505         */
 506        if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
 507                struct t4_swsqe *swsqe;
 508
 509                PDBG("%s out of order completion going in sw_sq at idx %u\n",
 510                     __func__, CQE_WRID_SQ_IDX(hw_cqe));
 511                swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 512                swsqe->cqe = *hw_cqe;
 513                swsqe->complete = 1;
 514                ret = -EAGAIN;
 515                goto flush_wq;
 516        }
 517
 518proc_cqe:
 519        *cqe = *hw_cqe;
 520
 521        /*
 522         * Reap the associated WR(s) that are freed up with this
 523         * completion.
 524         */
 525        if (SQ_TYPE(hw_cqe)) {
 526                wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
 527                PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
 528                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
 529                t4_sq_consume(wq);
 530        } else {
 531                PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
 532                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
 533                BUG_ON(t4_rq_empty(wq));
 534                t4_rq_consume(wq);
 535        }
 536
 537flush_wq:
 538        /*
 539         * Flush any completed cqes that are now in-order.
 540         */
 541        flush_completed_wrs(wq, cq);
 542
 543skip_cqe:
 544        if (SW_CQE(hw_cqe)) {
 545                PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
 546                     __func__, cq, cq->cqid, cq->sw_cidx);
 547                t4_swcq_consume(cq);
 548        } else {
 549                PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
 550                     __func__, cq, cq->cqid, cq->cidx);
 551                t4_hwcq_consume(cq);
 552        }
 553        return ret;
 554}
 555
 556/*
 557 * Get one cq entry from c4iw and map it to openib.
 558 *
 559 * Returns:
 560 *      0                       cqe returned
 561 *      -ENODATA                EMPTY;
 562 *      -EAGAIN                 caller must try again
 563 *      any other -errno        fatal error
 564 */
 565static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 566{
 567        struct c4iw_qp *qhp = NULL;
 568        struct t4_cqe cqe = {0, 0}, *rd_cqe;
 569        struct t4_wq *wq;
 570        u32 credit = 0;
 571        u8 cqe_flushed;
 572        u64 cookie = 0;
 573        int ret;
 574
 575        ret = t4_next_cqe(&chp->cq, &rd_cqe);
 576
 577        if (ret)
 578                return ret;
 579
 580        qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
 581        if (!qhp)
 582                wq = NULL;
 583        else {
 584                spin_lock(&qhp->lock);
 585                wq = &(qhp->wq);
 586        }
 587        ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
 588        if (ret)
 589                goto out;
 590
 591        wc->wr_id = cookie;
 592        wc->qp = &qhp->ibqp;
 593        wc->vendor_err = CQE_STATUS(&cqe);
 594        wc->wc_flags = 0;
 595
 596        PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
 597             "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
 598             CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
 599             CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
 600
 601        if (CQE_TYPE(&cqe) == 0) {
 602                if (!CQE_STATUS(&cqe))
 603                        wc->byte_len = CQE_LEN(&cqe);
 604                else
 605                        wc->byte_len = 0;
 606                wc->opcode = IB_WC_RECV;
 607                if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
 608                    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
 609                        wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
 610                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 611                }
 612        } else {
 613                switch (CQE_OPCODE(&cqe)) {
 614                case FW_RI_RDMA_WRITE:
 615                        wc->opcode = IB_WC_RDMA_WRITE;
 616                        break;
 617                case FW_RI_READ_REQ:
 618                        wc->opcode = IB_WC_RDMA_READ;
 619                        wc->byte_len = CQE_LEN(&cqe);
 620                        break;
 621                case FW_RI_SEND_WITH_INV:
 622                case FW_RI_SEND_WITH_SE_INV:
 623                        wc->opcode = IB_WC_SEND;
 624                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 625                        break;
 626                case FW_RI_SEND:
 627                case FW_RI_SEND_WITH_SE:
 628                        wc->opcode = IB_WC_SEND;
 629                        break;
 630                case FW_RI_BIND_MW:
 631                        wc->opcode = IB_WC_BIND_MW;
 632                        break;
 633
 634                case FW_RI_LOCAL_INV:
 635                        wc->opcode = IB_WC_LOCAL_INV;
 636                        break;
 637                case FW_RI_FAST_REGISTER:
 638                        wc->opcode = IB_WC_FAST_REG_MR;
 639                        break;
 640                default:
 641                        printk(KERN_ERR MOD "Unexpected opcode %d "
 642                               "in the CQE received for QPID=0x%0x\n",
 643                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
 644                        ret = -EINVAL;
 645                        goto out;
 646                }
 647        }
 648
 649        if (cqe_flushed)
 650                wc->status = IB_WC_WR_FLUSH_ERR;
 651        else {
 652
 653                switch (CQE_STATUS(&cqe)) {
 654                case T4_ERR_SUCCESS:
 655                        wc->status = IB_WC_SUCCESS;
 656                        break;
 657                case T4_ERR_STAG:
 658                        wc->status = IB_WC_LOC_ACCESS_ERR;
 659                        break;
 660                case T4_ERR_PDID:
 661                        wc->status = IB_WC_LOC_PROT_ERR;
 662                        break;
 663                case T4_ERR_QPID:
 664                case T4_ERR_ACCESS:
 665                        wc->status = IB_WC_LOC_ACCESS_ERR;
 666                        break;
 667                case T4_ERR_WRAP:
 668                        wc->status = IB_WC_GENERAL_ERR;
 669                        break;
 670                case T4_ERR_BOUND:
 671                        wc->status = IB_WC_LOC_LEN_ERR;
 672                        break;
 673                case T4_ERR_INVALIDATE_SHARED_MR:
 674                case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 675                        wc->status = IB_WC_MW_BIND_ERR;
 676                        break;
 677                case T4_ERR_CRC:
 678                case T4_ERR_MARKER:
 679                case T4_ERR_PDU_LEN_ERR:
 680                case T4_ERR_OUT_OF_RQE:
 681                case T4_ERR_DDP_VERSION:
 682                case T4_ERR_RDMA_VERSION:
 683                case T4_ERR_DDP_QUEUE_NUM:
 684                case T4_ERR_MSN:
 685                case T4_ERR_TBIT:
 686                case T4_ERR_MO:
 687                case T4_ERR_MSN_RANGE:
 688                case T4_ERR_IRD_OVERFLOW:
 689                case T4_ERR_OPCODE:
 690                case T4_ERR_INTERNAL_ERR:
 691                        wc->status = IB_WC_FATAL_ERR;
 692                        break;
 693                case T4_ERR_SWFLUSH:
 694                        wc->status = IB_WC_WR_FLUSH_ERR;
 695                        break;
 696                default:
 697                        printk(KERN_ERR MOD
 698                               "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
 699                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
 700                        ret = -EINVAL;
 701                }
 702        }
 703out:
 704        if (wq)
 705                spin_unlock(&qhp->lock);
 706        return ret;
 707}
 708
 709int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 710{
 711        struct c4iw_cq *chp;
 712        unsigned long flags;
 713        int npolled;
 714        int err = 0;
 715
 716        chp = to_c4iw_cq(ibcq);
 717
 718        spin_lock_irqsave(&chp->lock, flags);
 719        for (npolled = 0; npolled < num_entries; ++npolled) {
 720                do {
 721                        err = c4iw_poll_cq_one(chp, wc + npolled);
 722                } while (err == -EAGAIN);
 723                if (err)
 724                        break;
 725        }
 726        spin_unlock_irqrestore(&chp->lock, flags);
 727        return !err || err == -ENODATA ? npolled : err;
 728}
 729
 730int c4iw_destroy_cq(struct ib_cq *ib_cq)
 731{
 732        struct c4iw_cq *chp;
 733        struct c4iw_ucontext *ucontext;
 734
 735        PDBG("%s ib_cq %p\n", __func__, ib_cq);
 736        chp = to_c4iw_cq(ib_cq);
 737
 738        remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
 739        atomic_dec(&chp->refcnt);
 740        wait_event(chp->wait, !atomic_read(&chp->refcnt));
 741
 742        ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
 743                                  : NULL;
 744        destroy_cq(&chp->rhp->rdev, &chp->cq,
 745                   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
 746        kfree(chp);
 747        return 0;
 748}
 749
 750struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
 751                             int vector, struct ib_ucontext *ib_context,
 752                             struct ib_udata *udata)
 753{
 754        struct c4iw_dev *rhp;
 755        struct c4iw_cq *chp;
 756        struct c4iw_create_cq_resp uresp;
 757        struct c4iw_ucontext *ucontext = NULL;
 758        int ret;
 759        size_t memsize, hwentries;
 760        struct c4iw_mm_entry *mm, *mm2;
 761
 762        PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
 763
 764        rhp = to_c4iw_dev(ibdev);
 765
 766        chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 767        if (!chp)
 768                return ERR_PTR(-ENOMEM);
 769
 770        if (ib_context)
 771                ucontext = to_c4iw_ucontext(ib_context);
 772
 773        /* account for the status page. */
 774        entries++;
 775
 776        /* IQ needs one extra entry to differentiate full vs empty. */
 777        entries++;
 778
 779        /*
 780         * entries must be multiple of 16 for HW.
 781         */
 782        entries = roundup(entries, 16);
 783
 784        /*
 785         * Make actual HW queue 2x to avoid cdix_inc overflows.
 786         */
 787        hwentries = entries * 2;
 788
 789        /*
 790         * Make HW queue at least 64 entries so GTS updates aren't too
 791         * frequent.
 792         */
 793        if (hwentries < 64)
 794                hwentries = 64;
 795
 796        memsize = hwentries * sizeof *chp->cq.queue;
 797
 798        /*
 799         * memsize must be a multiple of the page size if its a user cq.
 800         */
 801        if (ucontext) {
 802                memsize = roundup(memsize, PAGE_SIZE);
 803                hwentries = memsize / sizeof *chp->cq.queue;
 804                while (hwentries > T4_MAX_IQ_SIZE) {
 805                        memsize -= PAGE_SIZE;
 806                        hwentries = memsize / sizeof *chp->cq.queue;
 807                }
 808        }
 809        chp->cq.size = hwentries;
 810        chp->cq.memsize = memsize;
 811
 812        ret = create_cq(&rhp->rdev, &chp->cq,
 813                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 814        if (ret)
 815                goto err1;
 816
 817        chp->rhp = rhp;
 818        chp->cq.size--;                         /* status page */
 819        chp->ibcq.cqe = entries - 2;
 820        spin_lock_init(&chp->lock);
 821        spin_lock_init(&chp->comp_handler_lock);
 822        atomic_set(&chp->refcnt, 1);
 823        init_waitqueue_head(&chp->wait);
 824        ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
 825        if (ret)
 826                goto err2;
 827
 828        if (ucontext) {
 829                mm = kmalloc(sizeof *mm, GFP_KERNEL);
 830                if (!mm)
 831                        goto err3;
 832                mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
 833                if (!mm2)
 834                        goto err4;
 835
 836                uresp.qid_mask = rhp->rdev.cqmask;
 837                uresp.cqid = chp->cq.cqid;
 838                uresp.size = chp->cq.size;
 839                uresp.memsize = chp->cq.memsize;
 840                spin_lock(&ucontext->mmap_lock);
 841                uresp.key = ucontext->key;
 842                ucontext->key += PAGE_SIZE;
 843                uresp.gts_key = ucontext->key;
 844                ucontext->key += PAGE_SIZE;
 845                spin_unlock(&ucontext->mmap_lock);
 846                ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
 847                if (ret)
 848                        goto err5;
 849
 850                mm->key = uresp.key;
 851                mm->addr = virt_to_phys(chp->cq.queue);
 852                mm->len = chp->cq.memsize;
 853                insert_mmap(ucontext, mm);
 854
 855                mm2->key = uresp.gts_key;
 856                mm2->addr = chp->cq.ugts;
 857                mm2->len = PAGE_SIZE;
 858                insert_mmap(ucontext, mm2);
 859        }
 860        PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
 861             __func__, chp->cq.cqid, chp, chp->cq.size,
 862             chp->cq.memsize,
 863             (unsigned long long) chp->cq.dma_addr);
 864        return &chp->ibcq;
 865err5:
 866        kfree(mm2);
 867err4:
 868        kfree(mm);
 869err3:
 870        remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
 871err2:
 872        destroy_cq(&chp->rhp->rdev, &chp->cq,
 873                   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 874err1:
 875        kfree(chp);
 876        return ERR_PTR(ret);
 877}
 878
 879int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
 880{
 881        return -ENOSYS;
 882}
 883
 884int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 885{
 886        struct c4iw_cq *chp;
 887        int ret;
 888        unsigned long flag;
 889
 890        chp = to_c4iw_cq(ibcq);
 891        spin_lock_irqsave(&chp->lock, flag);
 892        ret = t4_arm_cq(&chp->cq,
 893                        (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
 894        spin_unlock_irqrestore(&chp->lock, flag);
 895        if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
 896                ret = 0;
 897        return ret;
 898}
 899