linux/drivers/infiniband/hw/i40iw/i40iw_puda.c
<<
>>
Prefs
   1/*******************************************************************************
   2*
   3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4*
   5* This software is available to you under a choice of one of two
   6* licenses.  You may choose to be licensed under the terms of the GNU
   7* General Public License (GPL) Version 2, available from the file
   8* COPYING in the main directory of this source tree, or the
   9* OpenFabrics.org BSD license below:
  10*
  11*   Redistribution and use in source and binary forms, with or
  12*   without modification, are permitted provided that the following
  13*   conditions are met:
  14*
  15*    - Redistributions of source code must retain the above
  16*       copyright notice, this list of conditions and the following
  17*       disclaimer.
  18*
  19*    - Redistributions in binary form must reproduce the above
  20*       copyright notice, this list of conditions and the following
  21*       disclaimer in the documentation and/or other materials
  22*       provided with the distribution.
  23*
  24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31* SOFTWARE.
  32*
  33*******************************************************************************/
  34
  35#include "i40iw_osdep.h"
  36#include "i40iw_register.h"
  37#include "i40iw_status.h"
  38#include "i40iw_hmc.h"
  39
  40#include "i40iw_d.h"
  41#include "i40iw_type.h"
  42#include "i40iw_p.h"
  43#include "i40iw_puda.h"
  44
  45static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
  46                              struct i40iw_puda_buf *buf);
  47static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
  48static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
  49static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
  50                                                      *rsrc, bool initial);
  51/**
  52 * i40iw_puda_get_listbuf - get buffer from puda list
  53 * @list: list to use for buffers (ILQ or IEQ)
  54 */
  55static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
  56{
  57        struct i40iw_puda_buf *buf = NULL;
  58
  59        if (!list_empty(list)) {
  60                buf = (struct i40iw_puda_buf *)list->next;
  61                list_del((struct list_head *)&buf->list);
  62        }
  63        return buf;
  64}
  65
  66/**
  67 * i40iw_puda_get_bufpool - return buffer from resource
  68 * @rsrc: resource to use for buffer
  69 */
  70struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
  71{
  72        struct i40iw_puda_buf *buf = NULL;
  73        struct list_head *list = &rsrc->bufpool;
  74        unsigned long   flags;
  75
  76        spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  77        buf = i40iw_puda_get_listbuf(list);
  78        if (buf)
  79                rsrc->avail_buf_count--;
  80        else
  81                rsrc->stats_buf_alloc_fail++;
  82        spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  83        return buf;
  84}
  85
  86/**
  87 * i40iw_puda_ret_bufpool - return buffer to rsrc list
  88 * @rsrc: resource to use for buffer
  89 * @buf: buffe to return to resouce
  90 */
  91void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
  92                            struct i40iw_puda_buf *buf)
  93{
  94        unsigned long   flags;
  95
  96        spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  97        list_add(&buf->list, &rsrc->bufpool);
  98        spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  99        rsrc->avail_buf_count++;
 100}
 101
 102/**
 103 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
 104 * @rsrc: resource ptr
 105 * @wqe_idx: wqe index to use
 106 * @buf: puda buffer for rcv q
 107 * @initial: flag if during init time
 108 */
 109static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
 110                                    struct i40iw_puda_buf *buf, bool initial)
 111{
 112        u64 *wqe;
 113        struct i40iw_sc_qp *qp = &rsrc->qp;
 114        u64 offset24 = 0;
 115
 116        qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
 117        wqe = qp->qp_uk.rq_base[wqe_idx].elem;
 118        i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 119                    "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
 120                    wqe_idx, buf, wqe);
 121        if (!initial)
 122                get_64bit_val(wqe, 24, &offset24);
 123
 124        offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
 125        set_64bit_val(wqe, 24, offset24);
 126
 127        set_64bit_val(wqe, 0, buf->mem.pa);
 128        set_64bit_val(wqe, 8,
 129                      LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
 130        set_64bit_val(wqe, 24, offset24);
 131}
 132
 133/**
 134 * i40iw_puda_replenish_rq - post rcv buffers
 135 * @rsrc: resource to use for buffer
 136 * @initial: flag if during init time
 137 */
 138static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
 139                                                      bool initial)
 140{
 141        u32 i;
 142        u32 invalid_cnt = rsrc->rxq_invalid_cnt;
 143        struct i40iw_puda_buf *buf = NULL;
 144
 145        for (i = 0; i < invalid_cnt; i++) {
 146                buf = i40iw_puda_get_bufpool(rsrc);
 147                if (!buf)
 148                        return I40IW_ERR_list_empty;
 149                i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
 150                                        initial);
 151                rsrc->rx_wqe_idx =
 152                    ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
 153                rsrc->rxq_invalid_cnt--;
 154        }
 155        return 0;
 156}
 157
 158/**
 159 * i40iw_puda_alloc_buf - allocate mem for buffer
 160 * @dev: iwarp device
 161 * @length: length of buffer
 162 */
 163static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
 164                                                   u32 length)
 165{
 166        struct i40iw_puda_buf *buf = NULL;
 167        struct i40iw_virt_mem buf_mem;
 168        enum i40iw_status_code ret;
 169
 170        ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
 171                                      sizeof(struct i40iw_puda_buf));
 172        if (ret) {
 173                i40iw_debug(dev, I40IW_DEBUG_PUDA,
 174                            "%s: error mem for buf\n", __func__);
 175                return NULL;
 176        }
 177        buf = (struct i40iw_puda_buf *)buf_mem.va;
 178        ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
 179        if (ret) {
 180                i40iw_debug(dev, I40IW_DEBUG_PUDA,
 181                            "%s: error dma mem for buf\n", __func__);
 182                i40iw_free_virt_mem(dev->hw, &buf_mem);
 183                return NULL;
 184        }
 185        buf->buf_mem.va = buf_mem.va;
 186        buf->buf_mem.size = buf_mem.size;
 187        return buf;
 188}
 189
 190/**
 191 * i40iw_puda_dele_buf - delete buffer back to system
 192 * @dev: iwarp device
 193 * @buf: buffer to free
 194 */
 195static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
 196                                struct i40iw_puda_buf *buf)
 197{
 198        i40iw_free_dma_mem(dev->hw, &buf->mem);
 199        i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
 200}
 201
 202/**
 203 * i40iw_puda_get_next_send_wqe - return next wqe for processing
 204 * @qp: puda qp for wqe
 205 * @wqe_idx: wqe index for caller
 206 */
 207static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
 208{
 209        u64 *wqe = NULL;
 210        enum i40iw_status_code ret_code = 0;
 211
 212        *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
 213        if (!*wqe_idx)
 214                qp->swqe_polarity = !qp->swqe_polarity;
 215        I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
 216        if (ret_code)
 217                return wqe;
 218        wqe = qp->sq_base[*wqe_idx].elem;
 219
 220        return wqe;
 221}
 222
 223/**
 224 * i40iw_puda_poll_info - poll cq for completion
 225 * @cq: cq for poll
 226 * @info: info return for successful completion
 227 */
 228static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
 229                                                   struct i40iw_puda_completion_info *info)
 230{
 231        u64 qword0, qword2, qword3;
 232        u64 *cqe;
 233        u64 comp_ctx;
 234        bool valid_bit;
 235        u32 major_err, minor_err;
 236        bool error;
 237
 238        cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
 239        get_64bit_val(cqe, 24, &qword3);
 240        valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
 241
 242        if (valid_bit != cq->cq_uk.polarity)
 243                return I40IW_ERR_QUEUE_EMPTY;
 244
 245        i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
 246        error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
 247        if (error) {
 248                i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
 249                major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
 250                minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
 251                info->compl_error = major_err << 16 | minor_err;
 252                return I40IW_ERR_CQ_COMPL_ERROR;
 253        }
 254
 255        get_64bit_val(cqe, 0, &qword0);
 256        get_64bit_val(cqe, 16, &qword2);
 257
 258        info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
 259        info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
 260
 261        get_64bit_val(cqe, 8, &comp_ctx);
 262        info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
 263        info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
 264
 265        if (info->q_type == I40IW_CQE_QTYPE_RQ) {
 266                info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
 267                info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
 268                info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
 269                info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
 270        }
 271
 272        return 0;
 273}
 274
 275/**
 276 * i40iw_puda_poll_completion - processes completion for cq
 277 * @dev: iwarp device
 278 * @cq: cq getting interrupt
 279 * @compl_err: return any completion err
 280 */
 281enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
 282                                                  struct i40iw_sc_cq *cq, u32 *compl_err)
 283{
 284        struct i40iw_qp_uk *qp;
 285        struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
 286        struct i40iw_puda_completion_info info;
 287        enum i40iw_status_code ret = 0;
 288        struct i40iw_puda_buf *buf;
 289        struct i40iw_puda_rsrc *rsrc;
 290        void *sqwrid;
 291        u8 cq_type = cq->cq_type;
 292        unsigned long   flags;
 293
 294        if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
 295                rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
 296        } else {
 297                i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
 298                return I40IW_ERR_BAD_PTR;
 299        }
 300        memset(&info, 0, sizeof(info));
 301        ret = i40iw_puda_poll_info(cq, &info);
 302        *compl_err = info.compl_error;
 303        if (ret == I40IW_ERR_QUEUE_EMPTY)
 304                return ret;
 305        if (ret)
 306                goto done;
 307
 308        qp = info.qp;
 309        if (!qp || !rsrc) {
 310                ret = I40IW_ERR_BAD_PTR;
 311                goto done;
 312        }
 313
 314        if (qp->qp_id != rsrc->qp_id) {
 315                ret = I40IW_ERR_BAD_PTR;
 316                goto done;
 317        }
 318
 319        if (info.q_type == I40IW_CQE_QTYPE_RQ) {
 320                buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
 321                /* Get all the tcpip information in the buf header */
 322                ret = i40iw_puda_get_tcpip_info(&info, buf);
 323                if (ret) {
 324                        rsrc->stats_rcvd_pkt_err++;
 325                        if (cq_type == I40IW_CQ_TYPE_ILQ) {
 326                                i40iw_ilq_putback_rcvbuf(&rsrc->qp,
 327                                                         info.wqe_idx);
 328                        } else {
 329                                i40iw_puda_ret_bufpool(rsrc, buf);
 330                                i40iw_puda_replenish_rq(rsrc, false);
 331                        }
 332                        goto done;
 333                }
 334
 335                rsrc->stats_pkt_rcvd++;
 336                rsrc->compl_rxwqe_idx = info.wqe_idx;
 337                i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
 338                rsrc->receive(rsrc->dev, buf);
 339                if (cq_type == I40IW_CQ_TYPE_ILQ)
 340                        i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
 341                else
 342                        i40iw_puda_replenish_rq(rsrc, false);
 343
 344        } else {
 345                i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
 346                sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
 347                I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
 348                rsrc->xmit_complete(rsrc->dev, sqwrid);
 349                spin_lock_irqsave(&rsrc->bufpool_lock, flags);
 350                rsrc->tx_wqe_avail_cnt++;
 351                spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 352                if (!list_empty(&dev->ilq->txpend))
 353                        i40iw_puda_send_buf(dev->ilq, NULL);
 354        }
 355
 356done:
 357        I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
 358        if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
 359                cq_uk->polarity = !cq_uk->polarity;
 360        /* update cq tail in cq shadow memory also */
 361        I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
 362        set_64bit_val(cq_uk->shadow_area, 0,
 363                      I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
 364        return 0;
 365}
 366
 367/**
 368 * i40iw_puda_send - complete send wqe for transmit
 369 * @qp: puda qp for send
 370 * @info: buffer information for transmit
 371 */
 372enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
 373                                       struct i40iw_puda_send_info *info)
 374{
 375        u64 *wqe;
 376        u32 iplen, l4len;
 377        u64 header[2];
 378        u32 wqe_idx;
 379        u8 iipt;
 380
 381        /* number of 32 bits DWORDS in header */
 382        l4len = info->tcplen >> 2;
 383        if (info->ipv4) {
 384                iipt = 3;
 385                iplen = 5;
 386        } else {
 387                iipt = 1;
 388                iplen = 10;
 389        }
 390
 391        wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
 392        if (!wqe)
 393                return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
 394        qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
 395        /* Third line of WQE descriptor */
 396        /* maclen is in words */
 397        header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
 398                    LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
 399                    LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
 400                    LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
 401        /* Forth line of WQE descriptor */
 402        header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
 403                    LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
 404                    LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
 405                    LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
 406
 407        set_64bit_val(wqe, 0, info->paddr);
 408        set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
 409        set_64bit_val(wqe, 16, header[0]);
 410        set_64bit_val(wqe, 24, header[1]);
 411
 412        i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
 413        i40iw_qp_post_wr(&qp->qp_uk);
 414        return 0;
 415}
 416
 417/**
 418 * i40iw_puda_send_buf - transmit puda buffer
 419 * @rsrc: resource to use for buffer
 420 * @buf: puda buffer to transmit
 421 */
 422void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
 423{
 424        struct i40iw_puda_send_info info;
 425        enum i40iw_status_code ret = 0;
 426        unsigned long   flags;
 427
 428        spin_lock_irqsave(&rsrc->bufpool_lock, flags);
 429        /* if no wqe available or not from a completion and we have
 430         * pending buffers, we must queue new buffer
 431         */
 432        if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
 433                list_add_tail(&buf->list, &rsrc->txpend);
 434                spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 435                rsrc->stats_sent_pkt_q++;
 436                if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
 437                        i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 438                                    "%s: adding to txpend\n", __func__);
 439                return;
 440        }
 441        rsrc->tx_wqe_avail_cnt--;
 442        /* if we are coming from a completion and have pending buffers
 443         * then Get one from pending list
 444         */
 445        if (!buf) {
 446                buf = i40iw_puda_get_listbuf(&rsrc->txpend);
 447                if (!buf)
 448                        goto done;
 449        }
 450
 451        info.scratch = (void *)buf;
 452        info.paddr = buf->mem.pa;
 453        info.len = buf->totallen;
 454        info.tcplen = buf->tcphlen;
 455        info.maclen = buf->maclen;
 456        info.ipv4 = buf->ipv4;
 457        info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
 458
 459        ret = i40iw_puda_send(&rsrc->qp, &info);
 460        if (ret) {
 461                rsrc->tx_wqe_avail_cnt++;
 462                rsrc->stats_sent_pkt_q++;
 463                list_add(&buf->list, &rsrc->txpend);
 464                if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
 465                        i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 466                                    "%s: adding to puda_send\n", __func__);
 467        } else {
 468                rsrc->stats_pkt_sent++;
 469        }
 470done:
 471        spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 472}
 473
 474/**
 475 * i40iw_puda_qp_setctx - during init, set qp's context
 476 * @rsrc: qp's resource
 477 */
 478static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
 479{
 480        struct i40iw_sc_qp *qp = &rsrc->qp;
 481        u64 *qp_ctx = qp->hw_host_ctx;
 482
 483        set_64bit_val(qp_ctx, 8, qp->sq_pa);
 484        set_64bit_val(qp_ctx, 16, qp->rq_pa);
 485
 486        set_64bit_val(qp_ctx, 24,
 487                      LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
 488                      LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
 489
 490        set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
 491        set_64bit_val(qp_ctx, 56, 0);
 492        set_64bit_val(qp_ctx, 64, 1);
 493
 494        set_64bit_val(qp_ctx, 136,
 495                      LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
 496                      LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
 497
 498        set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
 499
 500        set_64bit_val(qp_ctx, 168,
 501                      LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
 502
 503        set_64bit_val(qp_ctx, 176,
 504                      LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
 505                      LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
 506                      LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
 507
 508        i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
 509                        qp_ctx, I40IW_QP_CTX_SIZE);
 510}
 511
 512/**
 513 * i40iw_puda_qp_wqe - setup wqe for qp create
 514 * @rsrc: resource for qp
 515 */
 516static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
 517{
 518        struct i40iw_sc_qp *qp = &rsrc->qp;
 519        struct i40iw_sc_dev *dev = rsrc->dev;
 520        struct i40iw_sc_cqp *cqp;
 521        u64 *wqe;
 522        u64 header;
 523        struct i40iw_ccq_cqe_info compl_info;
 524        enum i40iw_status_code status = 0;
 525
 526        cqp = dev->cqp;
 527        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
 528        if (!wqe)
 529                return I40IW_ERR_RING_FULL;
 530
 531        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
 532        set_64bit_val(wqe, 40, qp->shadow_area_pa);
 533        header = qp->qp_uk.qp_id |
 534                 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
 535                 LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
 536                 LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
 537                 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
 538                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 539
 540        set_64bit_val(wqe, 24, header);
 541
 542        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
 543        i40iw_sc_cqp_post_sq(cqp);
 544        status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 545                                                    I40IW_CQP_OP_CREATE_QP,
 546                                                    &compl_info);
 547        return status;
 548}
 549
 550/**
 551 * i40iw_puda_qp_create - create qp for resource
 552 * @rsrc: resource to use for buffer
 553 */
 554static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
 555{
 556        struct i40iw_sc_qp *qp = &rsrc->qp;
 557        struct i40iw_qp_uk *ukqp = &qp->qp_uk;
 558        enum i40iw_status_code ret = 0;
 559        u32 sq_size, rq_size, t_size;
 560        struct i40iw_dma_mem *mem;
 561
 562        sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
 563        rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
 564        t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
 565                  I40IW_QP_CTX_SIZE);
 566        /* Get page aligned memory */
 567        ret =
 568            i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
 569                                   I40IW_HW_PAGE_SIZE);
 570        if (ret) {
 571                i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
 572                return ret;
 573        }
 574
 575        mem = &rsrc->qpmem;
 576        memset(mem->va, 0, t_size);
 577        qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
 578        qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
 579        qp->pd = &rsrc->sc_pd;
 580        qp->qp_type = I40IW_QP_TYPE_UDA;
 581        qp->dev = rsrc->dev;
 582        qp->back_qp = (void *)rsrc;
 583        qp->sq_pa = mem->pa;
 584        qp->rq_pa = qp->sq_pa + sq_size;
 585        ukqp->sq_base = mem->va;
 586        ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
 587        ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
 588        qp->shadow_area_pa = qp->rq_pa + rq_size;
 589        qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
 590        qp->hw_host_ctx_pa =
 591                qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
 592        ukqp->qp_id = rsrc->qp_id;
 593        ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
 594        ukqp->rq_wrid_array = rsrc->rq_wrid_array;
 595
 596        ukqp->qp_id = rsrc->qp_id;
 597        ukqp->sq_size = rsrc->sq_size;
 598        ukqp->rq_size = rsrc->rq_size;
 599
 600        I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
 601        I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
 602        I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
 603
 604        if (qp->pd->dev->is_pf)
 605                ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
 606                                                    I40E_PFPE_WQEALLOC);
 607        else
 608                ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
 609                                                    I40E_VFPE_WQEALLOC1);
 610
 611        qp->qs_handle = qp->dev->qs_handle;
 612        i40iw_puda_qp_setctx(rsrc);
 613        ret = i40iw_puda_qp_wqe(rsrc);
 614        if (ret)
 615                i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
 616        return ret;
 617}
 618
 619/**
 620 * i40iw_puda_cq_create - create cq for resource
 621 * @rsrc: resource for which cq to create
 622 */
 623static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
 624{
 625        struct i40iw_sc_dev *dev = rsrc->dev;
 626        struct i40iw_sc_cq *cq = &rsrc->cq;
 627        u64 *wqe;
 628        struct i40iw_sc_cqp *cqp;
 629        u64 header;
 630        enum i40iw_status_code ret = 0;
 631        u32 tsize, cqsize;
 632        u32 shadow_read_threshold = 128;
 633        struct i40iw_dma_mem *mem;
 634        struct i40iw_ccq_cqe_info compl_info;
 635        struct i40iw_cq_init_info info;
 636        struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
 637
 638        cq->back_cq = (void *)rsrc;
 639        cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
 640        tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
 641        ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
 642                                     I40IW_CQ0_ALIGNMENT_MASK);
 643        if (ret)
 644                return ret;
 645
 646        mem = &rsrc->cqmem;
 647        memset(&info, 0, sizeof(info));
 648        info.dev = dev;
 649        info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
 650                         I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
 651        info.shadow_read_threshold = rsrc->cq_size >> 2;
 652        info.ceq_id_valid = true;
 653        info.cq_base_pa = mem->pa;
 654        info.shadow_area_pa = mem->pa + cqsize;
 655        init_info->cq_base = mem->va;
 656        init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
 657        init_info->cq_size = rsrc->cq_size;
 658        init_info->cq_id = rsrc->cq_id;
 659        ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
 660        if (ret)
 661                goto error;
 662        cqp = dev->cqp;
 663        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
 664        if (!wqe) {
 665                ret = I40IW_ERR_RING_FULL;
 666                goto error;
 667        }
 668
 669        set_64bit_val(wqe, 0, rsrc->cq_size);
 670        set_64bit_val(wqe, 8, RS_64_1(cq, 1));
 671        set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
 672        set_64bit_val(wqe, 32, cq->cq_pa);
 673
 674        set_64bit_val(wqe, 40, cq->shadow_area_pa);
 675
 676        header = rsrc->cq_id |
 677            LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
 678            LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
 679            LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
 680            LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
 681            LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 682        set_64bit_val(wqe, 24, header);
 683
 684        i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
 685                        wqe, I40IW_CQP_WQE_SIZE * 8);
 686
 687        i40iw_sc_cqp_post_sq(dev->cqp);
 688        ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 689                                                 I40IW_CQP_OP_CREATE_CQ,
 690                                                 &compl_info);
 691
 692error:
 693        if (ret)
 694                i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
 695        return ret;
 696}
 697
 698/**
 699 * i40iw_puda_dele_resources - delete all resources during close
 700 * @dev: iwarp device
 701 * @type: type of resource to dele
 702 * @reset: true if reset chip
 703 */
 704void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
 705                               enum puda_resource_type type,
 706                               bool reset)
 707{
 708        struct i40iw_ccq_cqe_info compl_info;
 709        struct i40iw_puda_rsrc *rsrc;
 710        struct i40iw_puda_buf *buf = NULL;
 711        struct i40iw_puda_buf *nextbuf = NULL;
 712        struct i40iw_virt_mem *vmem;
 713        enum i40iw_status_code ret;
 714
 715        switch (type) {
 716        case I40IW_PUDA_RSRC_TYPE_ILQ:
 717                rsrc = dev->ilq;
 718                vmem = &dev->ilq_mem;
 719                break;
 720        case I40IW_PUDA_RSRC_TYPE_IEQ:
 721                rsrc = dev->ieq;
 722                vmem = &dev->ieq_mem;
 723                break;
 724        default:
 725                i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
 726                            __func__, type);
 727                return;
 728        }
 729
 730        switch (rsrc->completion) {
 731        case PUDA_HASH_CRC_COMPLETE:
 732                i40iw_free_hash_desc(rsrc->hash_desc);
 733        case PUDA_QP_CREATED:
 734                do {
 735                        if (reset)
 736                                break;
 737                        ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
 738                                                              0, false, true, true);
 739                        if (ret)
 740                                i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 741                                            "%s error ieq qp destroy\n",
 742                                            __func__);
 743
 744                        ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 745                                                                 I40IW_CQP_OP_DESTROY_QP,
 746                                                                 &compl_info);
 747                        if (ret)
 748                                i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 749                                            "%s error ieq qp destroy done\n",
 750                                            __func__);
 751                } while (0);
 752
 753                i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
 754                /* fallthrough */
 755        case PUDA_CQ_CREATED:
 756                do {
 757                        if (reset)
 758                                break;
 759                        ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
 760                        if (ret)
 761                                i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 762                                            "%s error ieq cq destroy\n",
 763                                            __func__);
 764
 765                        ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 766                                                                 I40IW_CQP_OP_DESTROY_CQ,
 767                                                                 &compl_info);
 768                        if (ret)
 769                                i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 770                                            "%s error ieq qp destroy done\n",
 771                                            __func__);
 772                } while (0);
 773
 774                i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
 775                break;
 776        default:
 777                i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
 778                break;
 779        }
 780        /* Free all allocated puda buffers for both tx and rx */
 781        buf = rsrc->alloclist;
 782        while (buf) {
 783                nextbuf = buf->next;
 784                i40iw_puda_dele_buf(dev, buf);
 785                buf = nextbuf;
 786                rsrc->alloc_buf_count--;
 787        }
 788        i40iw_free_virt_mem(dev->hw, vmem);
 789}
 790
 791/**
 792 * i40iw_puda_allocbufs - allocate buffers for resource
 793 * @rsrc: resource for buffer allocation
 794 * @count: number of buffers to create
 795 */
 796static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
 797                                                   u32 count)
 798{
 799        u32 i;
 800        struct i40iw_puda_buf *buf;
 801        struct i40iw_puda_buf *nextbuf;
 802
 803        for (i = 0; i < count; i++) {
 804                buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
 805                if (!buf) {
 806                        rsrc->stats_buf_alloc_fail++;
 807                        return I40IW_ERR_NO_MEMORY;
 808                }
 809                i40iw_puda_ret_bufpool(rsrc, buf);
 810                rsrc->alloc_buf_count++;
 811                if (!rsrc->alloclist) {
 812                        rsrc->alloclist = buf;
 813                } else {
 814                        nextbuf = rsrc->alloclist;
 815                        rsrc->alloclist = buf;
 816                        buf->next = nextbuf;
 817                }
 818        }
 819        rsrc->avail_buf_count = rsrc->alloc_buf_count;
 820        return 0;
 821}
 822
 823/**
 824 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
 825 * @dev: iwarp device
 826 * @info: resource information
 827 */
 828enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
 829                                              struct i40iw_puda_rsrc_info *info)
 830{
 831        enum i40iw_status_code ret = 0;
 832        struct i40iw_puda_rsrc *rsrc;
 833        u32 pudasize;
 834        u32 sqwridsize, rqwridsize;
 835        struct i40iw_virt_mem *vmem;
 836
 837        info->count = 1;
 838        pudasize = sizeof(struct i40iw_puda_rsrc);
 839        sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
 840        rqwridsize = info->rq_size * 8;
 841        switch (info->type) {
 842        case I40IW_PUDA_RSRC_TYPE_ILQ:
 843                vmem = &dev->ilq_mem;
 844                break;
 845        case I40IW_PUDA_RSRC_TYPE_IEQ:
 846                vmem = &dev->ieq_mem;
 847                break;
 848        default:
 849                return I40IW_NOT_SUPPORTED;
 850        }
 851        ret =
 852            i40iw_allocate_virt_mem(dev->hw, vmem,
 853                                    pudasize + sqwridsize + rqwridsize);
 854        if (ret)
 855                return ret;
 856        rsrc = (struct i40iw_puda_rsrc *)vmem->va;
 857        spin_lock_init(&rsrc->bufpool_lock);
 858        if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
 859                dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
 860                dev->ilq_count = info->count;
 861                rsrc->receive = info->receive;
 862                rsrc->xmit_complete = info->xmit_complete;
 863        } else {
 864                vmem = &dev->ieq_mem;
 865                dev->ieq_count = info->count;
 866                dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
 867                rsrc->receive = i40iw_ieq_receive;
 868                rsrc->xmit_complete = i40iw_ieq_tx_compl;
 869        }
 870
 871        rsrc->type = info->type;
 872        rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
 873        rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
 874        rsrc->mss = info->mss;
 875        /* Initialize all ieq lists */
 876        INIT_LIST_HEAD(&rsrc->bufpool);
 877        INIT_LIST_HEAD(&rsrc->txpend);
 878
 879        rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
 880        dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
 881        rsrc->qp_id = info->qp_id;
 882        rsrc->cq_id = info->cq_id;
 883        rsrc->sq_size = info->sq_size;
 884        rsrc->rq_size = info->rq_size;
 885        rsrc->cq_size = info->rq_size + info->sq_size;
 886        rsrc->buf_size = info->buf_size;
 887        rsrc->dev = dev;
 888
 889        ret = i40iw_puda_cq_create(rsrc);
 890        if (!ret) {
 891                rsrc->completion = PUDA_CQ_CREATED;
 892                ret = i40iw_puda_qp_create(rsrc);
 893        }
 894        if (ret) {
 895                i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
 896                goto error;
 897        }
 898        rsrc->completion = PUDA_QP_CREATED;
 899
 900        ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
 901        if (ret) {
 902                i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
 903                goto error;
 904        }
 905
 906        rsrc->rxq_invalid_cnt = info->rq_size;
 907        ret = i40iw_puda_replenish_rq(rsrc, true);
 908        if (ret)
 909                goto error;
 910
 911        if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
 912                if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
 913                        rsrc->check_crc = true;
 914                        rsrc->completion = PUDA_HASH_CRC_COMPLETE;
 915                        ret = 0;
 916                }
 917        }
 918
 919        dev->ccq_ops->ccq_arm(&rsrc->cq);
 920        return ret;
 921 error:
 922        i40iw_puda_dele_resources(dev, info->type, false);
 923
 924        return ret;
 925}
 926
 927/**
 928 * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
 929 * @qp: ilq's qp resource
 930 * @wqe_idx:  wqe index of completed rcvbuf
 931 */
 932static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
 933{
 934        u64 *wqe;
 935        u64 offset24;
 936
 937        wqe = qp->qp_uk.rq_base[wqe_idx].elem;
 938        get_64bit_val(wqe, 24, &offset24);
 939        offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
 940        set_64bit_val(wqe, 24, offset24);
 941}
 942
 943/**
 944 * i40iw_ieq_get_fpdu - given length return fpdu length
 945 * @length: length if fpdu
 946 */
 947static u16 i40iw_ieq_get_fpdu_length(u16 length)
 948{
 949        u16 fpdu_len;
 950
 951        fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
 952        fpdu_len = (fpdu_len + 3) & 0xfffffffc;
 953        return fpdu_len;
 954}
 955
 956/**
 957 * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
 958 * @buf: rcv buffer with partial
 959 * @txbuf: tx buffer for sendign back
 960 * @buf_offset: rcv buffer offset to copy from
 961 * @txbuf_offset: at offset in tx buf to copy
 962 * @length: length of data to copy
 963 */
 964static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
 965                                    struct i40iw_puda_buf *txbuf,
 966                                    u16 buf_offset, u32 txbuf_offset,
 967                                    u32 length)
 968{
 969        void *mem1 = (u8 *)buf->mem.va + buf_offset;
 970        void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
 971
 972        memcpy(mem2, mem1, length);
 973}
 974
 975/**
 976 * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
 977 * @buf: reeive buffer with partial
 978 * @txbuf: buffer to prepare
 979 */
 980static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
 981                                   struct i40iw_puda_buf *txbuf)
 982{
 983        txbuf->maclen = buf->maclen;
 984        txbuf->tcphlen = buf->tcphlen;
 985        txbuf->ipv4 = buf->ipv4;
 986        txbuf->hdrlen = buf->hdrlen;
 987        i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
 988}
 989
 990/**
 991 * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
 992 * @buf: receive exception buffer
 993 * @fps: first partial sequence number
 994 */
 995static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
 996{
 997        u32 offset;
 998
 999        if (buf->seqnum < fps) {
1000                offset = fps - buf->seqnum;
1001                if (offset > buf->datalen)
1002                        return;
1003                buf->data += offset;
1004                buf->datalen -= (u16)offset;
1005                buf->seqnum = fps;
1006        }
1007}
1008
1009/**
1010 * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1011 * @ieq: ieq resource
1012 * @rxlist: ieq's received buffer list
1013 * @pbufl: temporary list for buffers for fpddu
1014 * @txbuf: tx buffer for fpdu
1015 * @fpdu_len: total length of fpdu
1016 */
1017static void  i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
1018                                   struct list_head *rxlist,
1019                                   struct list_head *pbufl,
1020                                   struct i40iw_puda_buf *txbuf,
1021                                   u16 fpdu_len)
1022{
1023        struct i40iw_puda_buf *buf;
1024        u32 nextseqnum;
1025        u16 txoffset, bufoffset;
1026
1027        buf = i40iw_puda_get_listbuf(pbufl);
1028        if (!buf)
1029                return;
1030        nextseqnum = buf->seqnum + fpdu_len;
1031        txbuf->totallen = buf->hdrlen + fpdu_len;
1032        txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1033        i40iw_ieq_setup_tx_buf(buf, txbuf);
1034
1035        txoffset = buf->hdrlen;
1036        bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1037
1038        do {
1039                if (buf->datalen >= fpdu_len) {
1040                        /* copied full fpdu */
1041                        i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
1042                        buf->datalen -= fpdu_len;
1043                        buf->data += fpdu_len;
1044                        buf->seqnum = nextseqnum;
1045                        break;
1046                }
1047                /* copy partial fpdu */
1048                i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
1049                txoffset += buf->datalen;
1050                fpdu_len -= buf->datalen;
1051                i40iw_puda_ret_bufpool(ieq, buf);
1052                buf = i40iw_puda_get_listbuf(pbufl);
1053                if (!buf)
1054                        return;
1055                bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1056        } while (1);
1057
1058        /* last buffer on the list*/
1059        if (buf->datalen)
1060                list_add(&buf->list, rxlist);
1061        else
1062                i40iw_puda_ret_bufpool(ieq, buf);
1063}
1064
1065/**
1066 * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1067 * @rxlist: resource list for receive ieq buffes
1068 * @pbufl: temp. list for buffers for fpddu
1069 * @buf: first receive buffer
1070 * @fpdu_len: total length of fpdu
1071 */
1072static enum i40iw_status_code i40iw_ieq_create_pbufl(
1073                                                     struct i40iw_pfpdu *pfpdu,
1074                                                     struct list_head *rxlist,
1075                                                     struct list_head *pbufl,
1076                                                     struct i40iw_puda_buf *buf,
1077                                                     u16 fpdu_len)
1078{
1079        enum i40iw_status_code status = 0;
1080        struct i40iw_puda_buf *nextbuf;
1081        u32     nextseqnum;
1082        u16 plen = fpdu_len - buf->datalen;
1083        bool done = false;
1084
1085        nextseqnum = buf->seqnum + buf->datalen;
1086        do {
1087                nextbuf = i40iw_puda_get_listbuf(rxlist);
1088                if (!nextbuf) {
1089                        status = I40IW_ERR_list_empty;
1090                        break;
1091                }
1092                list_add_tail(&nextbuf->list, pbufl);
1093                if (nextbuf->seqnum != nextseqnum) {
1094                        pfpdu->bad_seq_num++;
1095                        status = I40IW_ERR_SEQ_NUM;
1096                        break;
1097                }
1098                if (nextbuf->datalen >= plen) {
1099                        done = true;
1100                } else {
1101                        plen -= nextbuf->datalen;
1102                        nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1103                }
1104
1105        } while (!done);
1106
1107        return status;
1108}
1109
1110/**
1111 * i40iw_ieq_handle_partial - process partial fpdu buffer
1112 * @ieq: ieq resource
1113 * @pfpdu: partial management per user qp
1114 * @buf: receive buffer
1115 * @fpdu_len: fpdu len in the buffer
1116 */
1117static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
1118                                                       struct i40iw_pfpdu *pfpdu,
1119                                                       struct i40iw_puda_buf *buf,
1120                                                       u16 fpdu_len)
1121{
1122        enum i40iw_status_code status = 0;
1123        u8 *crcptr;
1124        u32 mpacrc;
1125        u32 seqnum = buf->seqnum;
1126        struct list_head pbufl; /* partial buffer list */
1127        struct i40iw_puda_buf *txbuf = NULL;
1128        struct list_head *rxlist = &pfpdu->rxlist;
1129
1130        INIT_LIST_HEAD(&pbufl);
1131        list_add(&buf->list, &pbufl);
1132
1133        status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1134        if (!status)
1135                goto error;
1136
1137        txbuf = i40iw_puda_get_bufpool(ieq);
1138        if (!txbuf) {
1139                pfpdu->no_tx_bufs++;
1140                status = I40IW_ERR_NO_TXBUFS;
1141                goto error;
1142        }
1143
1144        i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1145        i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1146        crcptr = txbuf->data + fpdu_len - 4;
1147        mpacrc = *(u32 *)crcptr;
1148        if (ieq->check_crc) {
1149                status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1150                                                (fpdu_len - 4), mpacrc);
1151                if (status) {
1152                        i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1153                                    "%s: error bad crc\n", __func__);
1154                        goto error;
1155                }
1156        }
1157
1158        i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
1159                        txbuf->mem.va, txbuf->totallen);
1160        i40iw_puda_send_buf(ieq, txbuf);
1161        pfpdu->rcv_nxt = seqnum + fpdu_len;
1162        return status;
1163 error:
1164        while (!list_empty(&pbufl)) {
1165                buf = (struct i40iw_puda_buf *)(pbufl.prev);
1166                list_del(&buf->list);
1167                list_add(&buf->list, rxlist);
1168        }
1169        if (txbuf)
1170                i40iw_puda_ret_bufpool(ieq, txbuf);
1171        return status;
1172}
1173
1174/**
1175 * i40iw_ieq_process_buf - process buffer rcvd for ieq
1176 * @ieq: ieq resource
1177 * @pfpdu: partial management per user qp
1178 * @buf: receive buffer
1179 */
1180static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
1181                                                    struct i40iw_pfpdu *pfpdu,
1182                                                    struct i40iw_puda_buf *buf)
1183{
1184        u16 fpdu_len = 0;
1185        u16 datalen = buf->datalen;
1186        u8 *datap = buf->data;
1187        u8 *crcptr;
1188        u16 ioffset = 0;
1189        u32 mpacrc;
1190        u32 seqnum = buf->seqnum;
1191        u16 length = 0;
1192        u16 full = 0;
1193        bool partial = false;
1194        struct i40iw_puda_buf *txbuf;
1195        struct list_head *rxlist = &pfpdu->rxlist;
1196        enum i40iw_status_code ret = 0;
1197        enum i40iw_status_code status = 0;
1198
1199        ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1200        while (datalen) {
1201                fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
1202                if (fpdu_len > pfpdu->max_fpdu_data) {
1203                        i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1204                                    "%s: error bad fpdu_len\n", __func__);
1205                        status = I40IW_ERR_MPA_CRC;
1206                        list_add(&buf->list, rxlist);
1207                        return status;
1208                }
1209
1210                if (datalen < fpdu_len) {
1211                        partial = true;
1212                        break;
1213                }
1214                crcptr = datap + fpdu_len - 4;
1215                mpacrc = *(u32 *)crcptr;
1216                if (ieq->check_crc)
1217                        ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
1218                                                     datap, fpdu_len - 4, mpacrc);
1219                if (ret) {
1220                        status = I40IW_ERR_MPA_CRC;
1221                        list_add(&buf->list, rxlist);
1222                        return status;
1223                }
1224                full++;
1225                pfpdu->fpdu_processed++;
1226                datap += fpdu_len;
1227                length += fpdu_len;
1228                datalen -= fpdu_len;
1229        }
1230        if (full) {
1231                /* copy full pdu's in the txbuf and send them out */
1232                txbuf = i40iw_puda_get_bufpool(ieq);
1233                if (!txbuf) {
1234                        pfpdu->no_tx_bufs++;
1235                        status = I40IW_ERR_NO_TXBUFS;
1236                        list_add(&buf->list, rxlist);
1237                        return status;
1238                }
1239                /* modify txbuf's buffer header */
1240                i40iw_ieq_setup_tx_buf(buf, txbuf);
1241                /* copy full fpdu's to new buffer */
1242                i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
1243                                        length);
1244                txbuf->totallen = buf->hdrlen + length;
1245
1246                i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
1247                i40iw_puda_send_buf(ieq, txbuf);
1248
1249                if (!datalen) {
1250                        pfpdu->rcv_nxt = buf->seqnum + length;
1251                        i40iw_puda_ret_bufpool(ieq, buf);
1252                        return status;
1253                }
1254                buf->data = datap;
1255                buf->seqnum = seqnum + length;
1256                buf->datalen = datalen;
1257                pfpdu->rcv_nxt = buf->seqnum;
1258        }
1259        if (partial)
1260                status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1261
1262        return status;
1263}
1264
1265/**
1266 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1267 * @qp: qp for which partial fpdus
1268 * @ieq: ieq resource
1269 */
1270static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
1271                                    struct i40iw_puda_rsrc *ieq)
1272{
1273        struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1274        struct list_head *rxlist = &pfpdu->rxlist;
1275        struct i40iw_puda_buf *buf;
1276        enum i40iw_status_code status;
1277
1278        do {
1279                if (list_empty(rxlist))
1280                        break;
1281                buf = i40iw_puda_get_listbuf(rxlist);
1282                if (!buf) {
1283                        i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1284                                    "%s: error no buf\n", __func__);
1285                        break;
1286                }
1287                if (buf->seqnum != pfpdu->rcv_nxt) {
1288                        /* This could be out of order or missing packet */
1289                        pfpdu->out_of_order++;
1290                        list_add(&buf->list, rxlist);
1291                        break;
1292                }
1293                /* keep processing buffers from the head of the list */
1294                status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
1295                if (status == I40IW_ERR_MPA_CRC) {
1296                        pfpdu->mpa_crc_err = true;
1297                        while (!list_empty(rxlist)) {
1298                                buf = i40iw_puda_get_listbuf(rxlist);
1299                                i40iw_puda_ret_bufpool(ieq, buf);
1300                                pfpdu->crc_err++;
1301                        }
1302                        /* create CQP for AE */
1303                        i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
1304                }
1305        } while (!status);
1306}
1307
1308/**
1309 * i40iw_ieq_handle_exception - handle qp's exception
1310 * @ieq: ieq resource
1311 * @qp: qp receiving excpetion
1312 * @buf: receive buffer
1313 */
1314static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1315                                       struct i40iw_sc_qp *qp,
1316                                       struct i40iw_puda_buf *buf)
1317{
1318        struct i40iw_puda_buf *tmpbuf = NULL;
1319        struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1320        u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1321        u32 rcv_wnd = hw_host_ctx[23];
1322        /* first partial seq # in q2 */
1323        u32 fps = qp->q2_buf[16];
1324        struct list_head *rxlist = &pfpdu->rxlist;
1325        struct list_head *plist;
1326
1327        pfpdu->total_ieq_bufs++;
1328
1329        if (pfpdu->mpa_crc_err) {
1330                pfpdu->crc_err++;
1331                goto error;
1332        }
1333        if (pfpdu->mode && (fps != pfpdu->fps)) {
1334                /* clean up qp as it is new partial sequence */
1335                i40iw_ieq_cleanup_qp(ieq->dev, qp);
1336                i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1337                            "%s: restarting new partial\n", __func__);
1338                pfpdu->mode = false;
1339        }
1340
1341        if (!pfpdu->mode) {
1342                i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
1343                /* First_Partial_Sequence_Number check */
1344                pfpdu->rcv_nxt = fps;
1345                pfpdu->fps = fps;
1346                pfpdu->mode = true;
1347                pfpdu->max_fpdu_data = ieq->mss;
1348                pfpdu->pmode_count++;
1349                INIT_LIST_HEAD(rxlist);
1350                i40iw_ieq_check_first_buf(buf, fps);
1351        }
1352
1353        if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1354                pfpdu->bad_seq_num++;
1355                goto error;
1356        }
1357
1358        if (!list_empty(rxlist)) {
1359                tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1360                plist = &tmpbuf->list;
1361                while ((struct list_head *)tmpbuf != rxlist) {
1362                        if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1363                                break;
1364                        tmpbuf = (struct i40iw_puda_buf *)plist->next;
1365                }
1366                /* Insert buf before tmpbuf */
1367                list_add_tail(&buf->list, &tmpbuf->list);
1368        } else {
1369                list_add_tail(&buf->list, rxlist);
1370        }
1371        i40iw_ieq_process_fpdus(qp, ieq);
1372        return;
1373 error:
1374        i40iw_puda_ret_bufpool(ieq, buf);
1375}
1376
1377/**
1378 * i40iw_ieq_receive - received exception buffer
1379 * @dev: iwarp device
1380 * @buf: exception buffer received
1381 */
1382static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
1383                              struct i40iw_puda_buf *buf)
1384{
1385        struct i40iw_puda_rsrc *ieq = dev->ieq;
1386        struct i40iw_sc_qp *qp = NULL;
1387        u32 wqe_idx = ieq->compl_rxwqe_idx;
1388
1389        qp = i40iw_ieq_get_qp(dev, buf);
1390        if (!qp) {
1391                ieq->stats_bad_qp_id++;
1392                i40iw_puda_ret_bufpool(ieq, buf);
1393        } else {
1394                i40iw_ieq_handle_exception(ieq, qp, buf);
1395        }
1396        /*
1397         * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1398         * on which wqe_idx to start replenish rq
1399         */
1400        if (!ieq->rxq_invalid_cnt)
1401                ieq->rx_wqe_idx = wqe_idx;
1402        ieq->rxq_invalid_cnt++;
1403}
1404
1405/**
1406 * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1407 * @dev: iwarp device
1408 * @sqwrid: pointer to puda buffer
1409 */
1410static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
1411{
1412        struct i40iw_puda_rsrc *ieq = dev->ieq;
1413        struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
1414
1415        i40iw_puda_ret_bufpool(ieq, buf);
1416        if (!list_empty(&ieq->txpend)) {
1417                buf = i40iw_puda_get_listbuf(&ieq->txpend);
1418                i40iw_puda_send_buf(ieq, buf);
1419        }
1420}
1421
1422/**
1423 * i40iw_ieq_cleanup_qp - qp is being destroyed
1424 * @dev: iwarp device
1425 * @qp: all pending fpdu buffers
1426 */
1427void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1428{
1429        struct i40iw_puda_buf *buf;
1430        struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1431        struct list_head *rxlist = &pfpdu->rxlist;
1432        struct i40iw_puda_rsrc *ieq = dev->ieq;
1433
1434        if (!pfpdu->mode)
1435                return;
1436        while (!list_empty(rxlist)) {
1437                buf = i40iw_puda_get_listbuf(rxlist);
1438                i40iw_puda_ret_bufpool(ieq, buf);
1439        }
1440}
1441