linux/drivers/scsi/csiostor/csio_scsi.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio FCoE driver for Linux.
   3 *
   4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/device.h>
  36#include <linux/delay.h>
  37#include <linux/ctype.h>
  38#include <linux/kernel.h>
  39#include <linux/slab.h>
  40#include <linux/string.h>
  41#include <linux/compiler.h>
  42#include <linux/export.h>
  43#include <linux/module.h>
  44#include <asm/unaligned.h>
  45#include <asm/page.h>
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_transport_fc.h>
  49
  50#include "csio_hw.h"
  51#include "csio_lnode.h"
  52#include "csio_rnode.h"
  53#include "csio_scsi.h"
  54#include "csio_init.h"
  55
  56int csio_scsi_eqsize = 65536;
  57int csio_scsi_iqlen = 128;
  58int csio_scsi_ioreqs = 2048;
  59uint32_t csio_max_scan_tmo;
  60uint32_t csio_delta_scan_tmo = 5;
  61int csio_lun_qdepth = 32;
  62
  63static int csio_ddp_descs = 128;
  64
  65static int csio_do_abrt_cls(struct csio_hw *,
  66                                      struct csio_ioreq *, bool);
  67
  68static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
  69static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
  70static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
  71static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
  72static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
  73static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
  74
  75/*
  76 * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
  77 * @ioreq: The I/O request
  78 * @sld: Level information
  79 *
  80 * Should be called with lock held.
  81 *
  82 */
  83static bool
  84csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
  85{
  86        struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
  87
  88        switch (sld->level) {
  89        case CSIO_LEV_LUN:
  90                if (scmnd == NULL)
  91                        return false;
  92
  93                return ((ioreq->lnode == sld->lnode) &&
  94                        (ioreq->rnode == sld->rnode) &&
  95                        ((uint64_t)scmnd->device->lun == sld->oslun));
  96
  97        case CSIO_LEV_RNODE:
  98                return ((ioreq->lnode == sld->lnode) &&
  99                                (ioreq->rnode == sld->rnode));
 100        case CSIO_LEV_LNODE:
 101                return (ioreq->lnode == sld->lnode);
 102        case CSIO_LEV_ALL:
 103                return true;
 104        default:
 105                return false;
 106        }
 107}
 108
 109/*
 110 * csio_scsi_gather_active_ios - Gather active I/Os based on level
 111 * @scm: SCSI module
 112 * @sld: Level information
 113 * @dest: The queue where these I/Os have to be gathered.
 114 *
 115 * Should be called with lock held.
 116 */
 117static void
 118csio_scsi_gather_active_ios(struct csio_scsim *scm,
 119                            struct csio_scsi_level_data *sld,
 120                            struct list_head *dest)
 121{
 122        struct list_head *tmp, *next;
 123
 124        if (list_empty(&scm->active_q))
 125                return;
 126
 127        /* Just splice the entire active_q into dest */
 128        if (sld->level == CSIO_LEV_ALL) {
 129                list_splice_tail_init(&scm->active_q, dest);
 130                return;
 131        }
 132
 133        list_for_each_safe(tmp, next, &scm->active_q) {
 134                if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
 135                        list_del_init(tmp);
 136                        list_add_tail(tmp, dest);
 137                }
 138        }
 139}
 140
 141static inline bool
 142csio_scsi_itnexus_loss_error(uint16_t error)
 143{
 144        switch (error) {
 145        case FW_ERR_LINK_DOWN:
 146        case FW_RDEV_NOT_READY:
 147        case FW_ERR_RDEV_LOST:
 148        case FW_ERR_RDEV_LOGO:
 149        case FW_ERR_RDEV_IMPL_LOGO:
 150                return true;
 151        }
 152        return false;
 153}
 154
 155/*
 156 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
 157 * @req: IO req structure.
 158 * @addr: DMA location to place the payload.
 159 *
 160 * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
 161 */
 162static inline void
 163csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
 164{
 165        struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
 166        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 167
 168        /* Check for Task Management */
 169        if (likely(scmnd->SCp.Message == 0)) {
 170                int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
 171                fcp_cmnd->fc_tm_flags = 0;
 172                fcp_cmnd->fc_cmdref = 0;
 173
 174                memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
 175                fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
 176                fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
 177
 178                if (req->nsge)
 179                        if (req->datadir == DMA_TO_DEVICE)
 180                                fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
 181                        else
 182                                fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
 183                else
 184                        fcp_cmnd->fc_flags = 0;
 185        } else {
 186                memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
 187                int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
 188                fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
 189        }
 190}
 191
 192/*
 193 * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
 194 * @req: IO req structure.
 195 * @addr: DMA location to place the payload.
 196 * @size: Size of WR (including FW WR + immed data + rsp SG entry
 197 *
 198 * Wrapper for populating fw_scsi_cmd_wr.
 199 */
 200static inline void
 201csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
 202{
 203        struct csio_hw *hw = req->lnode->hwp;
 204        struct csio_rnode *rn = req->rnode;
 205        struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
 206        struct csio_dma_buf *dma_buf;
 207        uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
 208
 209        wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
 210                                          FW_SCSI_CMD_WR_IMMDLEN(imm));
 211        wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
 212                                            FW_WR_LEN16_V(
 213                                                DIV_ROUND_UP(size, 16)));
 214
 215        wr->cookie = (uintptr_t) req;
 216        wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
 217        wr->tmo_val = (uint8_t) req->tmo;
 218        wr->r3 = 0;
 219        memset(&wr->r5, 0, 8);
 220
 221        /* Get RSP DMA buffer */
 222        dma_buf = &req->dma_buf;
 223
 224        /* Prepare RSP SGL */
 225        wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
 226        wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
 227
 228        wr->r6 = 0;
 229
 230        wr->u.fcoe.ctl_pri = 0;
 231        wr->u.fcoe.cp_en_class = 0;
 232        wr->u.fcoe.r4_lo[0] = 0;
 233        wr->u.fcoe.r4_lo[1] = 0;
 234
 235        /* Frame a FCP command */
 236        csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
 237                                    sizeof(struct fw_scsi_cmd_wr)));
 238}
 239
 240#define CSIO_SCSI_CMD_WR_SZ(_imm)                                       \
 241        (sizeof(struct fw_scsi_cmd_wr) +                /* WR size */   \
 242         ALIGN((_imm), 16))                             /* Immed data */
 243
 244#define CSIO_SCSI_CMD_WR_SZ_16(_imm)                                    \
 245                        (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
 246
 247/*
 248 * csio_scsi_cmd - Create a SCSI CMD WR.
 249 * @req: IO req structure.
 250 *
 251 * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
 252 *
 253 */
 254static inline void
 255csio_scsi_cmd(struct csio_ioreq *req)
 256{
 257        struct csio_wr_pair wrp;
 258        struct csio_hw *hw = req->lnode->hwp;
 259        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
 260        uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
 261
 262        req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
 263        if (unlikely(req->drv_status != 0))
 264                return;
 265
 266        if (wrp.size1 >= size) {
 267                /* Initialize WR in one shot */
 268                csio_scsi_init_cmd_wr(req, wrp.addr1, size);
 269        } else {
 270                uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
 271
 272                /*
 273                 * Make a temporary copy of the WR and write back
 274                 * the copy into the WR pair.
 275                 */
 276                csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
 277                memcpy(wrp.addr1, tmpwr, wrp.size1);
 278                memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
 279        }
 280}
 281
 282/*
 283 * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
 284 * @hw: HW module
 285 * @req: IO request
 286 * @sgl: ULP TX SGL pointer.
 287 *
 288 */
 289static inline void
 290csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
 291                           struct ulptx_sgl *sgl)
 292{
 293        struct ulptx_sge_pair *sge_pair = NULL;
 294        struct scatterlist *sgel;
 295        uint32_t i = 0;
 296        uint32_t xfer_len;
 297        struct list_head *tmp;
 298        struct csio_dma_buf *dma_buf;
 299        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 300
 301        sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
 302                                     ULPTX_NSGE_V(req->nsge));
 303        /* Now add the data SGLs */
 304        if (likely(!req->dcopy)) {
 305                scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
 306                        if (i == 0) {
 307                                sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
 308                                sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
 309                                sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
 310                                continue;
 311                        }
 312                        if ((i - 1) & 0x1) {
 313                                sge_pair->addr[1] = cpu_to_be64(
 314                                                        sg_dma_address(sgel));
 315                                sge_pair->len[1] = cpu_to_be32(
 316                                                        sg_dma_len(sgel));
 317                                sge_pair++;
 318                        } else {
 319                                sge_pair->addr[0] = cpu_to_be64(
 320                                                        sg_dma_address(sgel));
 321                                sge_pair->len[0] = cpu_to_be32(
 322                                                        sg_dma_len(sgel));
 323                        }
 324                }
 325        } else {
 326                /* Program sg elements with driver's DDP buffer */
 327                xfer_len = scsi_bufflen(scmnd);
 328                list_for_each(tmp, &req->gen_list) {
 329                        dma_buf = (struct csio_dma_buf *)tmp;
 330                        if (i == 0) {
 331                                sgl->addr0 = cpu_to_be64(dma_buf->paddr);
 332                                sgl->len0 = cpu_to_be32(
 333                                                min(xfer_len, dma_buf->len));
 334                                sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
 335                        } else if ((i - 1) & 0x1) {
 336                                sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
 337                                sge_pair->len[1] = cpu_to_be32(
 338                                                min(xfer_len, dma_buf->len));
 339                                sge_pair++;
 340                        } else {
 341                                sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
 342                                sge_pair->len[0] = cpu_to_be32(
 343                                                min(xfer_len, dma_buf->len));
 344                        }
 345                        xfer_len -= min(xfer_len, dma_buf->len);
 346                        i++;
 347                }
 348        }
 349}
 350
 351/*
 352 * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
 353 * @req: IO req structure.
 354 * @wrp: DMA location to place the payload.
 355 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
 356 *
 357 * Wrapper for populating fw_scsi_read_wr.
 358 */
 359static inline void
 360csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
 361{
 362        struct csio_hw *hw = req->lnode->hwp;
 363        struct csio_rnode *rn = req->rnode;
 364        struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
 365        struct ulptx_sgl *sgl;
 366        struct csio_dma_buf *dma_buf;
 367        uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
 368        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 369
 370        wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
 371                                     FW_SCSI_READ_WR_IMMDLEN(imm));
 372        wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
 373                                       FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
 374        wr->cookie = (uintptr_t)req;
 375        wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
 376        wr->tmo_val = (uint8_t)(req->tmo);
 377        wr->use_xfer_cnt = 1;
 378        wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
 379        wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
 380        /* Get RSP DMA buffer */
 381        dma_buf = &req->dma_buf;
 382
 383        /* Prepare RSP SGL */
 384        wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
 385        wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
 386
 387        wr->r4 = 0;
 388
 389        wr->u.fcoe.ctl_pri = 0;
 390        wr->u.fcoe.cp_en_class = 0;
 391        wr->u.fcoe.r3_lo[0] = 0;
 392        wr->u.fcoe.r3_lo[1] = 0;
 393        csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
 394                                        sizeof(struct fw_scsi_read_wr)));
 395
 396        /* Move WR pointer past command and immediate data */
 397        sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
 398                              sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
 399
 400        /* Fill in the DSGL */
 401        csio_scsi_init_ultptx_dsgl(hw, req, sgl);
 402}
 403
 404/*
 405 * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
 406 * @req: IO req structure.
 407 * @wrp: DMA location to place the payload.
 408 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
 409 *
 410 * Wrapper for populating fw_scsi_write_wr.
 411 */
 412static inline void
 413csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
 414{
 415        struct csio_hw *hw = req->lnode->hwp;
 416        struct csio_rnode *rn = req->rnode;
 417        struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
 418        struct ulptx_sgl *sgl;
 419        struct csio_dma_buf *dma_buf;
 420        uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
 421        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 422
 423        wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
 424                                     FW_SCSI_WRITE_WR_IMMDLEN(imm));
 425        wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
 426                                       FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
 427        wr->cookie = (uintptr_t)req;
 428        wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
 429        wr->tmo_val = (uint8_t)(req->tmo);
 430        wr->use_xfer_cnt = 1;
 431        wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
 432        wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
 433        /* Get RSP DMA buffer */
 434        dma_buf = &req->dma_buf;
 435
 436        /* Prepare RSP SGL */
 437        wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
 438        wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
 439
 440        wr->r4 = 0;
 441
 442        wr->u.fcoe.ctl_pri = 0;
 443        wr->u.fcoe.cp_en_class = 0;
 444        wr->u.fcoe.r3_lo[0] = 0;
 445        wr->u.fcoe.r3_lo[1] = 0;
 446        csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
 447                                        sizeof(struct fw_scsi_write_wr)));
 448
 449        /* Move WR pointer past command and immediate data */
 450        sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
 451                              sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
 452
 453        /* Fill in the DSGL */
 454        csio_scsi_init_ultptx_dsgl(hw, req, sgl);
 455}
 456
 457/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
 458#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm)                                \
 459do {                                                                           \
 460        (sz) = sizeof(struct fw_scsi_##oper##_wr) +     /* WR size */          \
 461               ALIGN((imm), 16) +                       /* Immed data */       \
 462               sizeof(struct ulptx_sgl);                /* ulptx_sgl */        \
 463                                                                               \
 464        if (unlikely((req)->nsge > 1))                                         \
 465                (sz) += (sizeof(struct ulptx_sge_pair) *                       \
 466                                (ALIGN(((req)->nsge - 1), 2) / 2));            \
 467                                                        /* Data SGE */         \
 468} while (0)
 469
 470/*
 471 * csio_scsi_read - Create a SCSI READ WR.
 472 * @req: IO req structure.
 473 *
 474 * Gets a WR slot in the ingress queue and initializes it with
 475 * SCSI READ WR.
 476 *
 477 */
 478static inline void
 479csio_scsi_read(struct csio_ioreq *req)
 480{
 481        struct csio_wr_pair wrp;
 482        uint32_t size;
 483        struct csio_hw *hw = req->lnode->hwp;
 484        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
 485
 486        CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
 487        size = ALIGN(size, 16);
 488
 489        req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
 490        if (likely(req->drv_status == 0)) {
 491                if (likely(wrp.size1 >= size)) {
 492                        /* Initialize WR in one shot */
 493                        csio_scsi_init_read_wr(req, wrp.addr1, size);
 494                } else {
 495                        uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
 496                        /*
 497                         * Make a temporary copy of the WR and write back
 498                         * the copy into the WR pair.
 499                         */
 500                        csio_scsi_init_read_wr(req, (void *)tmpwr, size);
 501                        memcpy(wrp.addr1, tmpwr, wrp.size1);
 502                        memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
 503                }
 504        }
 505}
 506
 507/*
 508 * csio_scsi_write - Create a SCSI WRITE WR.
 509 * @req: IO req structure.
 510 *
 511 * Gets a WR slot in the ingress queue and initializes it with
 512 * SCSI WRITE WR.
 513 *
 514 */
 515static inline void
 516csio_scsi_write(struct csio_ioreq *req)
 517{
 518        struct csio_wr_pair wrp;
 519        uint32_t size;
 520        struct csio_hw *hw = req->lnode->hwp;
 521        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
 522
 523        CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
 524        size = ALIGN(size, 16);
 525
 526        req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
 527        if (likely(req->drv_status == 0)) {
 528                if (likely(wrp.size1 >= size)) {
 529                        /* Initialize WR in one shot */
 530                        csio_scsi_init_write_wr(req, wrp.addr1, size);
 531                } else {
 532                        uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
 533                        /*
 534                         * Make a temporary copy of the WR and write back
 535                         * the copy into the WR pair.
 536                         */
 537                        csio_scsi_init_write_wr(req, (void *)tmpwr, size);
 538                        memcpy(wrp.addr1, tmpwr, wrp.size1);
 539                        memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
 540                }
 541        }
 542}
 543
 544/*
 545 * csio_setup_ddp - Setup DDP buffers for Read request.
 546 * @req: IO req structure.
 547 *
 548 * Checks SGLs/Data buffers are virtually contiguous required for DDP.
 549 * If contiguous,driver posts SGLs in the WR otherwise post internal
 550 * buffers for such request for DDP.
 551 */
 552static inline void
 553csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
 554{
 555#ifdef __CSIO_DEBUG__
 556        struct csio_hw *hw = req->lnode->hwp;
 557#endif
 558        struct scatterlist *sgel = NULL;
 559        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 560        uint64_t sg_addr = 0;
 561        uint32_t ddp_pagesz = 4096;
 562        uint32_t buf_off;
 563        struct csio_dma_buf *dma_buf = NULL;
 564        uint32_t alloc_len = 0;
 565        uint32_t xfer_len = 0;
 566        uint32_t sg_len = 0;
 567        uint32_t i;
 568
 569        scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
 570                sg_addr = sg_dma_address(sgel);
 571                sg_len  = sg_dma_len(sgel);
 572
 573                buf_off = sg_addr & (ddp_pagesz - 1);
 574
 575                /* Except 1st buffer,all buffer addr have to be Page aligned */
 576                if (i != 0 && buf_off) {
 577                        csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
 578                                 sg_addr, sg_len);
 579                        goto unaligned;
 580                }
 581
 582                /* Except last buffer,all buffer must end on page boundary */
 583                if ((i != (req->nsge - 1)) &&
 584                        ((buf_off + sg_len) & (ddp_pagesz - 1))) {
 585                        csio_dbg(hw,
 586                                 "SGL addr not ending on page boundary"
 587                                 "(%llx:%d)\n", sg_addr, sg_len);
 588                        goto unaligned;
 589                }
 590        }
 591
 592        /* SGL's are virtually contiguous. HW will DDP to SGLs */
 593        req->dcopy = 0;
 594        csio_scsi_read(req);
 595
 596        return;
 597
 598unaligned:
 599        CSIO_INC_STATS(scsim, n_unaligned);
 600        /*
 601         * For unaligned SGLs, driver will allocate internal DDP buffer.
 602         * Once command is completed data from DDP buffer copied to SGLs
 603         */
 604        req->dcopy = 1;
 605
 606        /* Use gen_list to store the DDP buffers */
 607        INIT_LIST_HEAD(&req->gen_list);
 608        xfer_len = scsi_bufflen(scmnd);
 609
 610        i = 0;
 611        /* Allocate ddp buffers for this request */
 612        while (alloc_len < xfer_len) {
 613                dma_buf = csio_get_scsi_ddp(scsim);
 614                if (dma_buf == NULL || i > scsim->max_sge) {
 615                        req->drv_status = -EBUSY;
 616                        break;
 617                }
 618                alloc_len += dma_buf->len;
 619                /* Added to IO req */
 620                list_add_tail(&dma_buf->list, &req->gen_list);
 621                i++;
 622        }
 623
 624        if (!req->drv_status) {
 625                /* set number of ddp bufs used */
 626                req->nsge = i;
 627                csio_scsi_read(req);
 628                return;
 629        }
 630
 631         /* release dma descs */
 632        if (i > 0)
 633                csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
 634}
 635
 636/*
 637 * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
 638 * @req: IO req structure.
 639 * @addr: DMA location to place the payload.
 640 * @size: Size of WR
 641 * @abort: abort OR close
 642 *
 643 * Wrapper for populating fw_scsi_cmd_wr.
 644 */
 645static inline void
 646csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
 647                           bool abort)
 648{
 649        struct csio_hw *hw = req->lnode->hwp;
 650        struct csio_rnode *rn = req->rnode;
 651        struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
 652
 653        wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
 654        wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
 655                                            FW_WR_LEN16_V(
 656                                                DIV_ROUND_UP(size, 16)));
 657
 658        wr->cookie = (uintptr_t) req;
 659        wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
 660        wr->tmo_val = (uint8_t) req->tmo;
 661        /* 0 for CHK_ALL_IO tells FW to look up t_cookie */
 662        wr->sub_opcode_to_chk_all_io =
 663                                (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
 664                                 FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
 665        wr->r3[0] = 0;
 666        wr->r3[1] = 0;
 667        wr->r3[2] = 0;
 668        wr->r3[3] = 0;
 669        /* Since we re-use the same ioreq for abort as well */
 670        wr->t_cookie = (uintptr_t) req;
 671}
 672
 673static inline void
 674csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
 675{
 676        struct csio_wr_pair wrp;
 677        struct csio_hw *hw = req->lnode->hwp;
 678        uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
 679
 680        req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
 681        if (req->drv_status != 0)
 682                return;
 683
 684        if (wrp.size1 >= size) {
 685                /* Initialize WR in one shot */
 686                csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
 687        } else {
 688                uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
 689                /*
 690                 * Make a temporary copy of the WR and write back
 691                 * the copy into the WR pair.
 692                 */
 693                csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
 694                memcpy(wrp.addr1, tmpwr, wrp.size1);
 695                memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
 696        }
 697}
 698
 699/*****************************************************************************/
 700/* START: SCSI SM                                                            */
 701/*****************************************************************************/
 702static void
 703csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
 704{
 705        struct csio_hw *hw = req->lnode->hwp;
 706        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
 707
 708        switch (evt) {
 709        case CSIO_SCSIE_START_IO:
 710
 711                if (req->nsge) {
 712                        if (req->datadir == DMA_TO_DEVICE) {
 713                                req->dcopy = 0;
 714                                csio_scsi_write(req);
 715                        } else
 716                                csio_setup_ddp(scsim, req);
 717                } else {
 718                        csio_scsi_cmd(req);
 719                }
 720
 721                if (likely(req->drv_status == 0)) {
 722                        /* change state and enqueue on active_q */
 723                        csio_set_state(&req->sm, csio_scsis_io_active);
 724                        list_add_tail(&req->sm.sm_list, &scsim->active_q);
 725                        csio_wr_issue(hw, req->eq_idx, false);
 726                        CSIO_INC_STATS(scsim, n_active);
 727
 728                        return;
 729                }
 730                break;
 731
 732        case CSIO_SCSIE_START_TM:
 733                csio_scsi_cmd(req);
 734                if (req->drv_status == 0) {
 735                        /*
 736                         * NOTE: We collect the affected I/Os prior to issuing
 737                         * LUN reset, and not after it. This is to prevent
 738                         * aborting I/Os that get issued after the LUN reset,
 739                         * but prior to LUN reset completion (in the event that
 740                         * the host stack has not blocked I/Os to a LUN that is
 741                         * being reset.
 742                         */
 743                        csio_set_state(&req->sm, csio_scsis_tm_active);
 744                        list_add_tail(&req->sm.sm_list, &scsim->active_q);
 745                        csio_wr_issue(hw, req->eq_idx, false);
 746                        CSIO_INC_STATS(scsim, n_tm_active);
 747                }
 748                return;
 749
 750        case CSIO_SCSIE_ABORT:
 751        case CSIO_SCSIE_CLOSE:
 752                /*
 753                 * NOTE:
 754                 * We could get here due to  :
 755                 * - a window in the cleanup path of the SCSI module
 756                 *   (csio_scsi_abort_io()). Please see NOTE in this function.
 757                 * - a window in the time we tried to issue an abort/close
 758                 *   of a request to FW, and the FW completed the request
 759                 *   itself.
 760                 *   Print a message for now, and return INVAL either way.
 761                 */
 762                req->drv_status = -EINVAL;
 763                csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
 764                break;
 765
 766        default:
 767                csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
 768                CSIO_DB_ASSERT(0);
 769        }
 770}
 771
 772static void
 773csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
 774{
 775        struct csio_hw *hw = req->lnode->hwp;
 776        struct csio_scsim *scm = csio_hw_to_scsim(hw);
 777        struct csio_rnode *rn;
 778
 779        switch (evt) {
 780        case CSIO_SCSIE_COMPLETED:
 781                CSIO_DEC_STATS(scm, n_active);
 782                list_del_init(&req->sm.sm_list);
 783                csio_set_state(&req->sm, csio_scsis_uninit);
 784                /*
 785                 * In MSIX mode, with multiple queues, the SCSI compeltions
 786                 * could reach us sooner than the FW events sent to indicate
 787                 * I-T nexus loss (link down, remote device logo etc). We
 788                 * dont want to be returning such I/Os to the upper layer
 789                 * immediately, since we wouldnt have reported the I-T nexus
 790                 * loss itself. This forces us to serialize such completions
 791                 * with the reporting of the I-T nexus loss. Therefore, we
 792                 * internally queue up such up such completions in the rnode.
 793                 * The reporting of I-T nexus loss to the upper layer is then
 794                 * followed by the returning of I/Os in this internal queue.
 795                 * Having another state alongwith another queue helps us take
 796                 * actions for events such as ABORT received while we are
 797                 * in this rnode queue.
 798                 */
 799                if (unlikely(req->wr_status != FW_SUCCESS)) {
 800                        rn = req->rnode;
 801                        /*
 802                         * FW says remote device is lost, but rnode
 803                         * doesnt reflect it.
 804                         */
 805                        if (csio_scsi_itnexus_loss_error(req->wr_status) &&
 806                                                csio_is_rnode_ready(rn)) {
 807                                csio_set_state(&req->sm,
 808                                                csio_scsis_shost_cmpl_await);
 809                                list_add_tail(&req->sm.sm_list,
 810                                              &rn->host_cmpl_q);
 811                        }
 812                }
 813
 814                break;
 815
 816        case CSIO_SCSIE_ABORT:
 817                csio_scsi_abrt_cls(req, SCSI_ABORT);
 818                if (req->drv_status == 0) {
 819                        csio_wr_issue(hw, req->eq_idx, false);
 820                        csio_set_state(&req->sm, csio_scsis_aborting);
 821                }
 822                break;
 823
 824        case CSIO_SCSIE_CLOSE:
 825                csio_scsi_abrt_cls(req, SCSI_CLOSE);
 826                if (req->drv_status == 0) {
 827                        csio_wr_issue(hw, req->eq_idx, false);
 828                        csio_set_state(&req->sm, csio_scsis_closing);
 829                }
 830                break;
 831
 832        case CSIO_SCSIE_DRVCLEANUP:
 833                req->wr_status = FW_HOSTERROR;
 834                CSIO_DEC_STATS(scm, n_active);
 835                csio_set_state(&req->sm, csio_scsis_uninit);
 836                break;
 837
 838        default:
 839                csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
 840                CSIO_DB_ASSERT(0);
 841        }
 842}
 843
 844static void
 845csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
 846{
 847        struct csio_hw *hw = req->lnode->hwp;
 848        struct csio_scsim *scm = csio_hw_to_scsim(hw);
 849
 850        switch (evt) {
 851        case CSIO_SCSIE_COMPLETED:
 852                CSIO_DEC_STATS(scm, n_tm_active);
 853                list_del_init(&req->sm.sm_list);
 854                csio_set_state(&req->sm, csio_scsis_uninit);
 855
 856                break;
 857
 858        case CSIO_SCSIE_ABORT:
 859                csio_scsi_abrt_cls(req, SCSI_ABORT);
 860                if (req->drv_status == 0) {
 861                        csio_wr_issue(hw, req->eq_idx, false);
 862                        csio_set_state(&req->sm, csio_scsis_aborting);
 863                }
 864                break;
 865
 866
 867        case CSIO_SCSIE_CLOSE:
 868                csio_scsi_abrt_cls(req, SCSI_CLOSE);
 869                if (req->drv_status == 0) {
 870                        csio_wr_issue(hw, req->eq_idx, false);
 871                        csio_set_state(&req->sm, csio_scsis_closing);
 872                }
 873                break;
 874
 875        case CSIO_SCSIE_DRVCLEANUP:
 876                req->wr_status = FW_HOSTERROR;
 877                CSIO_DEC_STATS(scm, n_tm_active);
 878                csio_set_state(&req->sm, csio_scsis_uninit);
 879                break;
 880
 881        default:
 882                csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
 883                CSIO_DB_ASSERT(0);
 884        }
 885}
 886
 887static void
 888csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
 889{
 890        struct csio_hw *hw = req->lnode->hwp;
 891        struct csio_scsim *scm = csio_hw_to_scsim(hw);
 892
 893        switch (evt) {
 894        case CSIO_SCSIE_COMPLETED:
 895                csio_dbg(hw,
 896                         "ioreq %p recvd cmpltd (wr_status:%d) "
 897                         "in aborting st\n", req, req->wr_status);
 898                /*
 899                 * Use -ECANCELED to explicitly tell the ABORTED event that
 900                 * the original I/O was returned to driver by FW.
 901                 * We dont really care if the I/O was returned with success by
 902                 * FW (because the ABORT and completion of the I/O crossed each
 903                 * other), or any other return value. Once we are in aborting
 904                 * state, the success or failure of the I/O is unimportant to
 905                 * us.
 906                 */
 907                req->drv_status = -ECANCELED;
 908                break;
 909
 910        case CSIO_SCSIE_ABORT:
 911                CSIO_INC_STATS(scm, n_abrt_dups);
 912                break;
 913
 914        case CSIO_SCSIE_ABORTED:
 915
 916                csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
 917                         req, req->wr_status, req->drv_status);
 918                /*
 919                 * Check if original I/O WR completed before the Abort
 920                 * completion.
 921                 */
 922                if (req->drv_status != -ECANCELED) {
 923                        csio_warn(hw,
 924                                  "Abort completed before original I/O,"
 925                                   " req:%p\n", req);
 926                        CSIO_DB_ASSERT(0);
 927                }
 928
 929                /*
 930                 * There are the following possible scenarios:
 931                 * 1. The abort completed successfully, FW returned FW_SUCCESS.
 932                 * 2. The completion of an I/O and the receipt of
 933                 *    abort for that I/O by the FW crossed each other.
 934                 *    The FW returned FW_EINVAL. The original I/O would have
 935                 *    returned with FW_SUCCESS or any other SCSI error.
 936                 * 3. The FW couldn't sent the abort out on the wire, as there
 937                 *    was an I-T nexus loss (link down, remote device logged
 938                 *    out etc). FW sent back an appropriate IT nexus loss status
 939                 *    for the abort.
 940                 * 4. FW sent an abort, but abort timed out (remote device
 941                 *    didnt respond). FW replied back with
 942                 *    FW_SCSI_ABORT_TIMEDOUT.
 943                 * 5. FW couldn't genuinely abort the request for some reason,
 944                 *    and sent us an error.
 945                 *
 946                 * The first 3 scenarios are treated as  succesful abort
 947                 * operations by the host, while the last 2 are failed attempts
 948                 * to abort. Manipulate the return value of the request
 949                 * appropriately, so that host can convey these results
 950                 * back to the upper layer.
 951                 */
 952                if ((req->wr_status == FW_SUCCESS) ||
 953                    (req->wr_status == FW_EINVAL) ||
 954                    csio_scsi_itnexus_loss_error(req->wr_status))
 955                        req->wr_status = FW_SCSI_ABORT_REQUESTED;
 956
 957                CSIO_DEC_STATS(scm, n_active);
 958                list_del_init(&req->sm.sm_list);
 959                csio_set_state(&req->sm, csio_scsis_uninit);
 960                break;
 961
 962        case CSIO_SCSIE_DRVCLEANUP:
 963                req->wr_status = FW_HOSTERROR;
 964                CSIO_DEC_STATS(scm, n_active);
 965                csio_set_state(&req->sm, csio_scsis_uninit);
 966                break;
 967
 968        case CSIO_SCSIE_CLOSE:
 969                /*
 970                 * We can receive this event from the module
 971                 * cleanup paths, if the FW forgot to reply to the ABORT WR
 972                 * and left this ioreq in this state. For now, just ignore
 973                 * the event. The CLOSE event is sent to this state, as
 974                 * the LINK may have already gone down.
 975                 */
 976                break;
 977
 978        default:
 979                csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
 980                CSIO_DB_ASSERT(0);
 981        }
 982}
 983
 984static void
 985csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
 986{
 987        struct csio_hw *hw = req->lnode->hwp;
 988        struct csio_scsim *scm = csio_hw_to_scsim(hw);
 989
 990        switch (evt) {
 991        case CSIO_SCSIE_COMPLETED:
 992                csio_dbg(hw,
 993                         "ioreq %p recvd cmpltd (wr_status:%d) "
 994                         "in closing st\n", req, req->wr_status);
 995                /*
 996                 * Use -ECANCELED to explicitly tell the CLOSED event that
 997                 * the original I/O was returned to driver by FW.
 998                 * We dont really care if the I/O was returned with success by
 999                 * FW (because the CLOSE and completion of the I/O crossed each
1000                 * other), or any other return value. Once we are in aborting
1001                 * state, the success or failure of the I/O is unimportant to
1002                 * us.
1003                 */
1004                req->drv_status = -ECANCELED;
1005                break;
1006
1007        case CSIO_SCSIE_CLOSED:
1008                /*
1009                 * Check if original I/O WR completed before the Close
1010                 * completion.
1011                 */
1012                if (req->drv_status != -ECANCELED) {
1013                        csio_fatal(hw,
1014                                   "Close completed before original I/O,"
1015                                   " req:%p\n", req);
1016                        CSIO_DB_ASSERT(0);
1017                }
1018
1019                /*
1020                 * Either close succeeded, or we issued close to FW at the
1021                 * same time FW compelted it to us. Either way, the I/O
1022                 * is closed.
1023                 */
1024                CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
1025                                        (req->wr_status == FW_EINVAL));
1026                req->wr_status = FW_SCSI_CLOSE_REQUESTED;
1027
1028                CSIO_DEC_STATS(scm, n_active);
1029                list_del_init(&req->sm.sm_list);
1030                csio_set_state(&req->sm, csio_scsis_uninit);
1031                break;
1032
1033        case CSIO_SCSIE_CLOSE:
1034                break;
1035
1036        case CSIO_SCSIE_DRVCLEANUP:
1037                req->wr_status = FW_HOSTERROR;
1038                CSIO_DEC_STATS(scm, n_active);
1039                csio_set_state(&req->sm, csio_scsis_uninit);
1040                break;
1041
1042        default:
1043                csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
1044                CSIO_DB_ASSERT(0);
1045        }
1046}
1047
1048static void
1049csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
1050{
1051        switch (evt) {
1052        case CSIO_SCSIE_ABORT:
1053        case CSIO_SCSIE_CLOSE:
1054                /*
1055                 * Just succeed the abort request, and hope that
1056                 * the remote device unregister path will cleanup
1057                 * this I/O to the upper layer within a sane
1058                 * amount of time.
1059                 */
1060                /*
1061                 * A close can come in during a LINK DOWN. The FW would have
1062                 * returned us the I/O back, but not the remote device lost
1063                 * FW event. In this interval, if the I/O times out at the upper
1064                 * layer, a close can come in. Take the same action as abort:
1065                 * return success, and hope that the remote device unregister
1066                 * path will cleanup this I/O. If the FW still doesnt send
1067                 * the msg, the close times out, and the upper layer resorts
1068                 * to the next level of error recovery.
1069                 */
1070                req->drv_status = 0;
1071                break;
1072        case CSIO_SCSIE_DRVCLEANUP:
1073                csio_set_state(&req->sm, csio_scsis_uninit);
1074                break;
1075        default:
1076                csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
1077                         evt, req);
1078                CSIO_DB_ASSERT(0);
1079        }
1080}
1081
1082/*
1083 * csio_scsi_cmpl_handler - WR completion handler for SCSI.
1084 * @hw: HW module.
1085 * @wr: The completed WR from the ingress queue.
1086 * @len: Length of the WR.
1087 * @flb: Freelist buffer array.
1088 * @priv: Private object
1089 * @scsiwr: Pointer to SCSI WR.
1090 *
1091 * This is the WR completion handler called per completion from the
1092 * ISR. It is called with lock held. It walks past the RSS and CPL message
1093 * header where the actual WR is present.
1094 * It then gets the status, WR handle (ioreq pointer) and the len of
1095 * the WR, based on WR opcode. Only on a non-good status is the entire
1096 * WR copied into the WR cache (ioreq->fw_wr).
1097 * The ioreq corresponding to the WR is returned to the caller.
1098 * NOTE: The SCSI queue doesnt allocate a freelist today, hence
1099 * no freelist buffer is expected.
1100 */
1101struct csio_ioreq *
1102csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
1103                     struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
1104{
1105        struct csio_ioreq *ioreq = NULL;
1106        struct cpl_fw6_msg *cpl;
1107        uint8_t *tempwr;
1108        uint8_t status;
1109        struct csio_scsim *scm = csio_hw_to_scsim(hw);
1110
1111        /* skip RSS header */
1112        cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
1113
1114        if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
1115                csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
1116                          cpl->opcode);
1117                CSIO_INC_STATS(scm, n_inval_cplop);
1118                return NULL;
1119        }
1120
1121        tempwr = (uint8_t *)(cpl->data);
1122        status = csio_wr_status(tempwr);
1123        *scsiwr = tempwr;
1124
1125        if (likely((*tempwr == FW_SCSI_READ_WR) ||
1126                        (*tempwr == FW_SCSI_WRITE_WR) ||
1127                        (*tempwr == FW_SCSI_CMD_WR))) {
1128                ioreq = (struct csio_ioreq *)((uintptr_t)
1129                                 (((struct fw_scsi_read_wr *)tempwr)->cookie));
1130                CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1131
1132                ioreq->wr_status = status;
1133
1134                return ioreq;
1135        }
1136
1137        if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
1138                ioreq = (struct csio_ioreq *)((uintptr_t)
1139                         (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
1140                CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1141
1142                ioreq->wr_status = status;
1143                return ioreq;
1144        }
1145
1146        csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
1147        CSIO_INC_STATS(scm, n_inval_scsiop);
1148        return NULL;
1149}
1150
1151/*
1152 * csio_scsi_cleanup_io_q - Cleanup the given queue.
1153 * @scm: SCSI module.
1154 * @q: Queue to be cleaned up.
1155 *
1156 * Called with lock held. Has to exit with lock held.
1157 */
1158void
1159csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
1160{
1161        struct csio_hw *hw = scm->hw;
1162        struct csio_ioreq *ioreq;
1163        struct list_head *tmp, *next;
1164        struct scsi_cmnd *scmnd;
1165
1166        /* Call back the completion routines of the active_q */
1167        list_for_each_safe(tmp, next, q) {
1168                ioreq = (struct csio_ioreq *)tmp;
1169                csio_scsi_drvcleanup(ioreq);
1170                list_del_init(&ioreq->sm.sm_list);
1171                scmnd = csio_scsi_cmnd(ioreq);
1172                spin_unlock_irq(&hw->lock);
1173
1174                /*
1175                 * Upper layers may have cleared this command, hence this
1176                 * check to avoid accessing stale references.
1177                 */
1178                if (scmnd != NULL)
1179                        ioreq->io_cbfn(hw, ioreq);
1180
1181                spin_lock_irq(&scm->freelist_lock);
1182                csio_put_scsi_ioreq(scm, ioreq);
1183                spin_unlock_irq(&scm->freelist_lock);
1184
1185                spin_lock_irq(&hw->lock);
1186        }
1187}
1188
1189#define CSIO_SCSI_ABORT_Q_POLL_MS               2000
1190
1191static void
1192csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
1193{
1194        struct csio_lnode *ln = ioreq->lnode;
1195        struct csio_hw *hw = ln->hwp;
1196        int ready = 0;
1197        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1198        int rv;
1199
1200        if (csio_scsi_cmnd(ioreq) != scmnd) {
1201                CSIO_INC_STATS(scsim, n_abrt_race_comp);
1202                return;
1203        }
1204
1205        ready = csio_is_lnode_ready(ln);
1206
1207        rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1208        if (rv != 0) {
1209                if (ready)
1210                        CSIO_INC_STATS(scsim, n_abrt_busy_error);
1211                else
1212                        CSIO_INC_STATS(scsim, n_cls_busy_error);
1213        }
1214}
1215
1216/*
1217 * csio_scsi_abort_io_q - Abort all I/Os on given queue
1218 * @scm: SCSI module.
1219 * @q: Queue to abort.
1220 * @tmo: Timeout in ms
1221 *
1222 * Attempt to abort all I/Os on given queue, and wait for a max
1223 * of tmo milliseconds for them to complete. Returns success
1224 * if all I/Os are aborted. Else returns -ETIMEDOUT.
1225 * Should be entered with lock held. Exits with lock held.
1226 * NOTE:
1227 * Lock has to be held across the loop that aborts I/Os, since dropping the lock
1228 * in between can cause the list to be corrupted. As a result, the caller
1229 * of this function has to ensure that the number of I/os to be aborted
1230 * is finite enough to not cause lock-held-for-too-long issues.
1231 */
1232static int
1233csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
1234{
1235        struct csio_hw *hw = scm->hw;
1236        struct list_head *tmp, *next;
1237        int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
1238        struct scsi_cmnd *scmnd;
1239
1240        if (list_empty(q))
1241                return 0;
1242
1243        csio_dbg(hw, "Aborting SCSI I/Os\n");
1244
1245        /* Now abort/close I/Os in the queue passed */
1246        list_for_each_safe(tmp, next, q) {
1247                scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
1248                csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
1249        }
1250
1251        /* Wait till all active I/Os are completed/aborted/closed */
1252        while (!list_empty(q) && count--) {
1253                spin_unlock_irq(&hw->lock);
1254                msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1255                spin_lock_irq(&hw->lock);
1256        }
1257
1258        /* all aborts completed */
1259        if (list_empty(q))
1260                return 0;
1261
1262        return -ETIMEDOUT;
1263}
1264
1265/*
1266 * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
1267 * @scm: SCSI module.
1268 * @abort: abort required.
1269 * Called with lock held, should exit with lock held.
1270 * Can sleep when waiting for I/Os to complete.
1271 */
1272int
1273csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
1274{
1275        struct csio_hw *hw = scm->hw;
1276        int rv = 0;
1277        int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1278
1279        /* No I/Os pending */
1280        if (list_empty(&scm->active_q))
1281                return 0;
1282
1283        /* Wait until all active I/Os are completed */
1284        while (!list_empty(&scm->active_q) && count--) {
1285                spin_unlock_irq(&hw->lock);
1286                msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1287                spin_lock_irq(&hw->lock);
1288        }
1289
1290        /* all I/Os completed */
1291        if (list_empty(&scm->active_q))
1292                return 0;
1293
1294        /* Else abort */
1295        if (abort) {
1296                rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
1297                if (rv == 0)
1298                        return rv;
1299                csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1300        }
1301
1302        csio_scsi_cleanup_io_q(scm, &scm->active_q);
1303
1304        CSIO_DB_ASSERT(list_empty(&scm->active_q));
1305
1306        return rv;
1307}
1308
1309/*
1310 * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
1311 * @scm: SCSI module.
1312 * @lnode: lnode
1313 *
1314 * Called with lock held, should exit with lock held.
1315 * Can sleep (with dropped lock) when waiting for I/Os to complete.
1316 */
1317int
1318csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
1319{
1320        struct csio_hw *hw = scm->hw;
1321        struct csio_scsi_level_data sld;
1322        int rv;
1323        int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1324
1325        csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
1326
1327        sld.level = CSIO_LEV_LNODE;
1328        sld.lnode = ln;
1329        INIT_LIST_HEAD(&ln->cmpl_q);
1330        csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
1331
1332        /* No I/Os pending on this lnode  */
1333        if (list_empty(&ln->cmpl_q))
1334                return 0;
1335
1336        /* Wait until all active I/Os on this lnode are completed */
1337        while (!list_empty(&ln->cmpl_q) && count--) {
1338                spin_unlock_irq(&hw->lock);
1339                msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1340                spin_lock_irq(&hw->lock);
1341        }
1342
1343        /* all I/Os completed */
1344        if (list_empty(&ln->cmpl_q))
1345                return 0;
1346
1347        csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
1348
1349        /* I/Os are pending, abort them */
1350        rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
1351        if (rv != 0) {
1352                csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1353                csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
1354        }
1355
1356        CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
1357
1358        return rv;
1359}
1360
1361static ssize_t
1362csio_show_hw_state(struct device *dev,
1363                   struct device_attribute *attr, char *buf)
1364{
1365        struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1366        struct csio_hw *hw = csio_lnode_to_hw(ln);
1367
1368        if (csio_is_hw_ready(hw))
1369                return snprintf(buf, PAGE_SIZE, "ready\n");
1370        else
1371                return snprintf(buf, PAGE_SIZE, "not ready\n");
1372}
1373
1374/* Device reset */
1375static ssize_t
1376csio_device_reset(struct device *dev,
1377                   struct device_attribute *attr, const char *buf, size_t count)
1378{
1379        struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1380        struct csio_hw *hw = csio_lnode_to_hw(ln);
1381
1382        if (*buf != '1')
1383                return -EINVAL;
1384
1385        /* Delete NPIV lnodes */
1386        csio_lnodes_exit(hw, 1);
1387
1388        /* Block upper IOs */
1389        csio_lnodes_block_request(hw);
1390
1391        spin_lock_irq(&hw->lock);
1392        csio_hw_reset(hw);
1393        spin_unlock_irq(&hw->lock);
1394
1395        /* Unblock upper IOs */
1396        csio_lnodes_unblock_request(hw);
1397        return count;
1398}
1399
1400/* disable port */
1401static ssize_t
1402csio_disable_port(struct device *dev,
1403                   struct device_attribute *attr, const char *buf, size_t count)
1404{
1405        struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1406        struct csio_hw *hw = csio_lnode_to_hw(ln);
1407        bool disable;
1408
1409        if (*buf == '1' || *buf == '0')
1410                disable = (*buf == '1') ? true : false;
1411        else
1412                return -EINVAL;
1413
1414        /* Block upper IOs */
1415        csio_lnodes_block_by_port(hw, ln->portid);
1416
1417        spin_lock_irq(&hw->lock);
1418        csio_disable_lnodes(hw, ln->portid, disable);
1419        spin_unlock_irq(&hw->lock);
1420
1421        /* Unblock upper IOs */
1422        csio_lnodes_unblock_by_port(hw, ln->portid);
1423        return count;
1424}
1425
1426/* Show debug level */
1427static ssize_t
1428csio_show_dbg_level(struct device *dev,
1429                   struct device_attribute *attr, char *buf)
1430{
1431        struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1432
1433        return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
1434}
1435
1436/* Store debug level */
1437static ssize_t
1438csio_store_dbg_level(struct device *dev,
1439                   struct device_attribute *attr, const char *buf, size_t count)
1440{
1441        struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1442        struct csio_hw *hw = csio_lnode_to_hw(ln);
1443        uint32_t dbg_level = 0;
1444
1445        if (!isdigit(buf[0]))
1446                return -EINVAL;
1447
1448        if (sscanf(buf, "%i", &dbg_level))
1449                return -EINVAL;
1450
1451        ln->params.log_level = dbg_level;
1452        hw->params.log_level = dbg_level;
1453
1454        return 0;
1455}
1456
1457static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
1458static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
1459static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
1460static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
1461                  csio_store_dbg_level);
1462
1463static struct device_attribute *csio_fcoe_lport_attrs[] = {
1464        &dev_attr_hw_state,
1465        &dev_attr_device_reset,
1466        &dev_attr_disable_port,
1467        &dev_attr_dbg_level,
1468        NULL,
1469};
1470
1471static ssize_t
1472csio_show_num_reg_rnodes(struct device *dev,
1473                     struct device_attribute *attr, char *buf)
1474{
1475        struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1476
1477        return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
1478}
1479
1480static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
1481
1482static struct device_attribute *csio_fcoe_vport_attrs[] = {
1483        &dev_attr_num_reg_rnodes,
1484        &dev_attr_dbg_level,
1485        NULL,
1486};
1487
1488static inline uint32_t
1489csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
1490{
1491        struct scsi_cmnd *scmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1492        struct scatterlist *sg;
1493        uint32_t bytes_left;
1494        uint32_t bytes_copy;
1495        uint32_t buf_off = 0;
1496        uint32_t start_off = 0;
1497        uint32_t sg_off = 0;
1498        void *sg_addr;
1499        void *buf_addr;
1500        struct csio_dma_buf *dma_buf;
1501
1502        bytes_left = scsi_bufflen(scmnd);
1503        sg = scsi_sglist(scmnd);
1504        dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
1505
1506        /* Copy data from driver buffer to SGs of SCSI CMD */
1507        while (bytes_left > 0 && sg && dma_buf) {
1508                if (buf_off >= dma_buf->len) {
1509                        buf_off = 0;
1510                        dma_buf = (struct csio_dma_buf *)
1511                                        csio_list_next(dma_buf);
1512                        continue;
1513                }
1514
1515                if (start_off >= sg->length) {
1516                        start_off -= sg->length;
1517                        sg = sg_next(sg);
1518                        continue;
1519                }
1520
1521                buf_addr = dma_buf->vaddr + buf_off;
1522                sg_off = sg->offset + start_off;
1523                bytes_copy = min((dma_buf->len - buf_off),
1524                                sg->length - start_off);
1525                bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
1526                                 bytes_copy);
1527
1528                sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
1529                if (!sg_addr) {
1530                        csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
1531                                sg, req);
1532                        break;
1533                }
1534
1535                csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
1536                                sg_addr, sg_off, buf_addr, bytes_copy);
1537                memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
1538                kunmap_atomic(sg_addr);
1539
1540                start_off +=  bytes_copy;
1541                buf_off += bytes_copy;
1542                bytes_left -= bytes_copy;
1543        }
1544
1545        if (bytes_left > 0)
1546                return DID_ERROR;
1547        else
1548                return DID_OK;
1549}
1550
1551/*
1552 * csio_scsi_err_handler - SCSI error handler.
1553 * @hw: HW module.
1554 * @req: IO request.
1555 *
1556 */
1557static inline void
1558csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
1559{
1560        struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1561        struct csio_scsim *scm = csio_hw_to_scsim(hw);
1562        struct fcp_resp_with_ext *fcp_resp;
1563        struct fcp_resp_rsp_info *rsp_info;
1564        struct csio_dma_buf *dma_buf;
1565        uint8_t flags, scsi_status = 0;
1566        uint32_t host_status = DID_OK;
1567        uint32_t rsp_len = 0, sns_len = 0;
1568        struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1569
1570
1571        switch (req->wr_status) {
1572        case FW_HOSTERROR:
1573                if (unlikely(!csio_is_hw_ready(hw)))
1574                        return;
1575
1576                host_status = DID_ERROR;
1577                CSIO_INC_STATS(scm, n_hosterror);
1578
1579                break;
1580        case FW_SCSI_RSP_ERR:
1581                dma_buf = &req->dma_buf;
1582                fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
1583                rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
1584                flags = fcp_resp->resp.fr_flags;
1585                scsi_status = fcp_resp->resp.fr_status;
1586
1587                if (flags & FCP_RSP_LEN_VAL) {
1588                        rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
1589                        if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
1590                                (rsp_info->rsp_code != FCP_TMF_CMPL)) {
1591                                host_status = DID_ERROR;
1592                                goto out;
1593                        }
1594                }
1595
1596                if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
1597                        sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
1598                        if (sns_len > SCSI_SENSE_BUFFERSIZE)
1599                                sns_len = SCSI_SENSE_BUFFERSIZE;
1600
1601                        memcpy(cmnd->sense_buffer,
1602                               &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
1603                        CSIO_INC_STATS(scm, n_autosense);
1604                }
1605
1606                scsi_set_resid(cmnd, 0);
1607
1608                /* Under run */
1609                if (flags & FCP_RESID_UNDER) {
1610                        scsi_set_resid(cmnd,
1611                                       be32_to_cpu(fcp_resp->ext.fr_resid));
1612
1613                        if (!(flags & FCP_SNS_LEN_VAL) &&
1614                            (scsi_status == SAM_STAT_GOOD) &&
1615                            ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
1616                                                        < cmnd->underflow))
1617                                host_status = DID_ERROR;
1618                } else if (flags & FCP_RESID_OVER)
1619                        host_status = DID_ERROR;
1620
1621                CSIO_INC_STATS(scm, n_rsperror);
1622                break;
1623
1624        case FW_SCSI_OVER_FLOW_ERR:
1625                csio_warn(hw,
1626                          "Over-flow error,cmnd:0x%x expected len:0x%x"
1627                          " resid:0x%x\n", cmnd->cmnd[0],
1628                          scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1629                host_status = DID_ERROR;
1630                CSIO_INC_STATS(scm, n_ovflerror);
1631                break;
1632
1633        case FW_SCSI_UNDER_FLOW_ERR:
1634                csio_warn(hw,
1635                          "Under-flow error,cmnd:0x%x expected"
1636                          " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
1637                          cmnd->cmnd[0], scsi_bufflen(cmnd),
1638                          scsi_get_resid(cmnd), cmnd->device->lun,
1639                          rn->flowid);
1640                host_status = DID_ERROR;
1641                CSIO_INC_STATS(scm, n_unflerror);
1642                break;
1643
1644        case FW_SCSI_ABORT_REQUESTED:
1645        case FW_SCSI_ABORTED:
1646        case FW_SCSI_CLOSE_REQUESTED:
1647                csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
1648                             cmnd->cmnd[0],
1649                            (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
1650                            "closed" : "aborted");
1651                /*
1652                 * csio_eh_abort_handler checks this value to
1653                 * succeed or fail the abort request.
1654                 */
1655                host_status = DID_REQUEUE;
1656                if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
1657                        CSIO_INC_STATS(scm, n_closed);
1658                else
1659                        CSIO_INC_STATS(scm, n_aborted);
1660                break;
1661
1662        case FW_SCSI_ABORT_TIMEDOUT:
1663                /* FW timed out the abort itself */
1664                csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
1665                         req, cmnd, req->wr_status);
1666                host_status = DID_ERROR;
1667                CSIO_INC_STATS(scm, n_abrt_timedout);
1668                break;
1669
1670        case FW_RDEV_NOT_READY:
1671                /*
1672                 * In firmware, a RDEV can get into this state
1673                 * temporarily, before moving into dissapeared/lost
1674                 * state. So, the driver should complete the request equivalent
1675                 * to device-disappeared!
1676                 */
1677                CSIO_INC_STATS(scm, n_rdev_nr_error);
1678                host_status = DID_ERROR;
1679                break;
1680
1681        case FW_ERR_RDEV_LOST:
1682                CSIO_INC_STATS(scm, n_rdev_lost_error);
1683                host_status = DID_ERROR;
1684                break;
1685
1686        case FW_ERR_RDEV_LOGO:
1687                CSIO_INC_STATS(scm, n_rdev_logo_error);
1688                host_status = DID_ERROR;
1689                break;
1690
1691        case FW_ERR_RDEV_IMPL_LOGO:
1692                host_status = DID_ERROR;
1693                break;
1694
1695        case FW_ERR_LINK_DOWN:
1696                CSIO_INC_STATS(scm, n_link_down_error);
1697                host_status = DID_ERROR;
1698                break;
1699
1700        case FW_FCOE_NO_XCHG:
1701                CSIO_INC_STATS(scm, n_no_xchg_error);
1702                host_status = DID_ERROR;
1703                break;
1704
1705        default:
1706                csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
1707                            req->wr_status, req, cmnd);
1708                CSIO_DB_ASSERT(0);
1709
1710                CSIO_INC_STATS(scm, n_unknown_error);
1711                host_status = DID_ERROR;
1712                break;
1713        }
1714
1715out:
1716        if (req->nsge > 0) {
1717                scsi_dma_unmap(cmnd);
1718                if (req->dcopy && (host_status == DID_OK))
1719                        host_status = csio_scsi_copy_to_sgl(hw, req);
1720        }
1721
1722        cmnd->result = (((host_status) << 16) | scsi_status);
1723        cmnd->scsi_done(cmnd);
1724
1725        /* Wake up waiting threads */
1726        csio_scsi_cmnd(req) = NULL;
1727        complete(&req->cmplobj);
1728}
1729
1730/*
1731 * csio_scsi_cbfn - SCSI callback function.
1732 * @hw: HW module.
1733 * @req: IO request.
1734 *
1735 */
1736static void
1737csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
1738{
1739        struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1740        uint8_t scsi_status = SAM_STAT_GOOD;
1741        uint32_t host_status = DID_OK;
1742
1743        if (likely(req->wr_status == FW_SUCCESS)) {
1744                if (req->nsge > 0) {
1745                        scsi_dma_unmap(cmnd);
1746                        if (req->dcopy)
1747                                host_status = csio_scsi_copy_to_sgl(hw, req);
1748                }
1749
1750                cmnd->result = (((host_status) << 16) | scsi_status);
1751                cmnd->scsi_done(cmnd);
1752                csio_scsi_cmnd(req) = NULL;
1753                CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
1754        } else {
1755                /* Error handling */
1756                csio_scsi_err_handler(hw, req);
1757        }
1758}
1759
1760/**
1761 * csio_queuecommand - Entry point to kickstart an I/O request.
1762 * @host:       The scsi_host pointer.
1763 * @cmnd:       The I/O request from ML.
1764 *
1765 * This routine does the following:
1766 *      - Checks for HW and Rnode module readiness.
1767 *      - Gets a free ioreq structure (which is already initialized
1768 *        to uninit during its allocation).
1769 *      - Maps SG elements.
1770 *      - Initializes ioreq members.
1771 *      - Kicks off the SCSI state machine for this IO.
1772 *      - Returns busy status on error.
1773 */
1774static int
1775csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
1776{
1777        struct csio_lnode *ln = shost_priv(host);
1778        struct csio_hw *hw = csio_lnode_to_hw(ln);
1779        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1780        struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1781        struct csio_ioreq *ioreq = NULL;
1782        unsigned long flags;
1783        int nsge = 0;
1784        int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
1785        int retval;
1786        struct csio_scsi_qset *sqset;
1787        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1788
1789        sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
1790
1791        nr = fc_remote_port_chkready(rport);
1792        if (nr) {
1793                cmnd->result = nr;
1794                CSIO_INC_STATS(scsim, n_rn_nr_error);
1795                goto err_done;
1796        }
1797
1798        if (unlikely(!csio_is_hw_ready(hw))) {
1799                cmnd->result = (DID_REQUEUE << 16);
1800                CSIO_INC_STATS(scsim, n_hw_nr_error);
1801                goto err_done;
1802        }
1803
1804        /* Get req->nsge, if there are SG elements to be mapped  */
1805        nsge = scsi_dma_map(cmnd);
1806        if (unlikely(nsge < 0)) {
1807                CSIO_INC_STATS(scsim, n_dmamap_error);
1808                goto err;
1809        }
1810
1811        /* Do we support so many mappings? */
1812        if (unlikely(nsge > scsim->max_sge)) {
1813                csio_warn(hw,
1814                          "More SGEs than can be supported."
1815                          " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
1816                CSIO_INC_STATS(scsim, n_unsupp_sge_error);
1817                goto err_dma_unmap;
1818        }
1819
1820        /* Get a free ioreq structure - SM is already set to uninit */
1821        ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
1822        if (!ioreq) {
1823                csio_err(hw, "Out of I/O request elements. Active #:%d\n",
1824                         scsim->stats.n_active);
1825                CSIO_INC_STATS(scsim, n_no_req_error);
1826                goto err_dma_unmap;
1827        }
1828
1829        ioreq->nsge             = nsge;
1830        ioreq->lnode            = ln;
1831        ioreq->rnode            = rn;
1832        ioreq->iq_idx           = sqset->iq_idx;
1833        ioreq->eq_idx           = sqset->eq_idx;
1834        ioreq->wr_status        = 0;
1835        ioreq->drv_status       = 0;
1836        csio_scsi_cmnd(ioreq)   = (void *)cmnd;
1837        ioreq->tmo              = 0;
1838        ioreq->datadir          = cmnd->sc_data_direction;
1839
1840        if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
1841                CSIO_INC_STATS(ln, n_output_requests);
1842                ln->stats.n_output_bytes += scsi_bufflen(cmnd);
1843        } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
1844                CSIO_INC_STATS(ln, n_input_requests);
1845                ln->stats.n_input_bytes += scsi_bufflen(cmnd);
1846        } else
1847                CSIO_INC_STATS(ln, n_control_requests);
1848
1849        /* Set cbfn */
1850        ioreq->io_cbfn = csio_scsi_cbfn;
1851
1852        /* Needed during abort */
1853        cmnd->host_scribble = (unsigned char *)ioreq;
1854        cmnd->SCp.Message = 0;
1855
1856        /* Kick off SCSI IO SM on the ioreq */
1857        spin_lock_irqsave(&hw->lock, flags);
1858        retval = csio_scsi_start_io(ioreq);
1859        spin_unlock_irqrestore(&hw->lock, flags);
1860
1861        if (retval != 0) {
1862                csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
1863                         ioreq, retval);
1864                CSIO_INC_STATS(scsim, n_busy_error);
1865                goto err_put_req;
1866        }
1867
1868        return 0;
1869
1870err_put_req:
1871        csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
1872err_dma_unmap:
1873        if (nsge > 0)
1874                scsi_dma_unmap(cmnd);
1875err:
1876        return rv;
1877
1878err_done:
1879        cmnd->scsi_done(cmnd);
1880        return 0;
1881}
1882
1883static int
1884csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
1885{
1886        int rv;
1887        int cpu = smp_processor_id();
1888        struct csio_lnode *ln = ioreq->lnode;
1889        struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
1890
1891        ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
1892        /*
1893         * Use current processor queue for posting the abort/close, but retain
1894         * the ingress queue ID of the original I/O being aborted/closed - we
1895         * need the abort/close completion to be received on the same queue
1896         * as the original I/O.
1897         */
1898        ioreq->eq_idx = sqset->eq_idx;
1899
1900        if (abort == SCSI_ABORT)
1901                rv = csio_scsi_abort(ioreq);
1902        else
1903                rv = csio_scsi_close(ioreq);
1904
1905        return rv;
1906}
1907
1908static int
1909csio_eh_abort_handler(struct scsi_cmnd *cmnd)
1910{
1911        struct csio_ioreq *ioreq;
1912        struct csio_lnode *ln = shost_priv(cmnd->device->host);
1913        struct csio_hw *hw = csio_lnode_to_hw(ln);
1914        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1915        int ready = 0, ret;
1916        unsigned long tmo = 0;
1917        int rv;
1918        struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1919
1920        ret = fc_block_scsi_eh(cmnd);
1921        if (ret)
1922                return ret;
1923
1924        ioreq = (struct csio_ioreq *)cmnd->host_scribble;
1925        if (!ioreq)
1926                return SUCCESS;
1927
1928        if (!rn)
1929                return FAILED;
1930
1931        csio_dbg(hw,
1932                 "Request to abort ioreq:%p cmd:%p cdb:%08llx"
1933                 " ssni:0x%x lun:%llu iq:0x%x\n",
1934                ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
1935                cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
1936
1937        if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
1938                CSIO_INC_STATS(scsim, n_abrt_race_comp);
1939                return SUCCESS;
1940        }
1941
1942        ready = csio_is_lnode_ready(ln);
1943        tmo = CSIO_SCSI_ABRT_TMO_MS;
1944
1945        reinit_completion(&ioreq->cmplobj);
1946        spin_lock_irq(&hw->lock);
1947        rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1948        spin_unlock_irq(&hw->lock);
1949
1950        if (rv != 0) {
1951                if (rv == -EINVAL) {
1952                        /* Return success, if abort/close request issued on
1953                         * already completed IO
1954                         */
1955                        return SUCCESS;
1956                }
1957                if (ready)
1958                        CSIO_INC_STATS(scsim, n_abrt_busy_error);
1959                else
1960                        CSIO_INC_STATS(scsim, n_cls_busy_error);
1961
1962                goto inval_scmnd;
1963        }
1964
1965        wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
1966
1967        /* FW didnt respond to abort within our timeout */
1968        if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
1969
1970                csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
1971                CSIO_INC_STATS(scsim, n_abrt_timedout);
1972
1973inval_scmnd:
1974                if (ioreq->nsge > 0)
1975                        scsi_dma_unmap(cmnd);
1976
1977                spin_lock_irq(&hw->lock);
1978                csio_scsi_cmnd(ioreq) = NULL;
1979                spin_unlock_irq(&hw->lock);
1980
1981                cmnd->result = (DID_ERROR << 16);
1982                cmnd->scsi_done(cmnd);
1983
1984                return FAILED;
1985        }
1986
1987        /* FW successfully aborted the request */
1988        if (host_byte(cmnd->result) == DID_REQUEUE) {
1989                csio_info(hw,
1990                        "Aborted SCSI command to (%d:%llu) tag %u\n",
1991                        cmnd->device->id, cmnd->device->lun,
1992                        scsi_cmd_to_rq(cmnd)->tag);
1993                return SUCCESS;
1994        } else {
1995                csio_info(hw,
1996                        "Failed to abort SCSI command, (%d:%llu) tag %u\n",
1997                        cmnd->device->id, cmnd->device->lun,
1998                        scsi_cmd_to_rq(cmnd)->tag);
1999                return FAILED;
2000        }
2001}
2002
2003/*
2004 * csio_tm_cbfn - TM callback function.
2005 * @hw: HW module.
2006 * @req: IO request.
2007 *
2008 * Cache the result in 'cmnd', since ioreq will be freed soon
2009 * after we return from here, and the waiting thread shouldnt trust
2010 * the ioreq contents.
2011 */
2012static void
2013csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
2014{
2015        struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
2016        struct csio_dma_buf *dma_buf;
2017        uint8_t flags = 0;
2018        struct fcp_resp_with_ext *fcp_resp;
2019        struct fcp_resp_rsp_info *rsp_info;
2020
2021        csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
2022                      req, req->wr_status);
2023
2024        /* Cache FW return status */
2025        cmnd->SCp.Status = req->wr_status;
2026
2027        /* Special handling based on FCP response */
2028
2029        /*
2030         * FW returns us this error, if flags were set. FCP4 says
2031         * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
2032         * So if a target were to set this bit, we expect that the
2033         * rsp_code is set to FCP_TMF_CMPL for a successful TM
2034         * completion. Any other rsp_code means TM operation failed.
2035         * If a target were to just ignore setting flags, we treat
2036         * the TM operation as success, and FW returns FW_SUCCESS.
2037         */
2038        if (req->wr_status == FW_SCSI_RSP_ERR) {
2039                dma_buf = &req->dma_buf;
2040                fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
2041                rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
2042
2043                flags = fcp_resp->resp.fr_flags;
2044
2045                /* Modify return status if flags indicate success */
2046                if (flags & FCP_RSP_LEN_VAL)
2047                        if (rsp_info->rsp_code == FCP_TMF_CMPL)
2048                                cmnd->SCp.Status = FW_SUCCESS;
2049
2050                csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
2051        }
2052
2053        /* Wake up the TM handler thread */
2054        csio_scsi_cmnd(req) = NULL;
2055}
2056
2057static int
2058csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
2059{
2060        struct csio_lnode *ln = shost_priv(cmnd->device->host);
2061        struct csio_hw *hw = csio_lnode_to_hw(ln);
2062        struct csio_scsim *scsim = csio_hw_to_scsim(hw);
2063        struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
2064        struct csio_ioreq *ioreq = NULL;
2065        struct csio_scsi_qset *sqset;
2066        unsigned long flags;
2067        int retval;
2068        int count, ret;
2069        LIST_HEAD(local_q);
2070        struct csio_scsi_level_data sld;
2071
2072        if (!rn)
2073                goto fail;
2074
2075        csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
2076                      cmnd->device->lun, rn->flowid, rn->scsi_id);
2077
2078        if (!csio_is_lnode_ready(ln)) {
2079                csio_err(hw,
2080                         "LUN reset cannot be issued on non-ready"
2081                         " local node vnpi:0x%x (LUN:%llu)\n",
2082                         ln->vnp_flowid, cmnd->device->lun);
2083                goto fail;
2084        }
2085
2086        /* Lnode is ready, now wait on rport node readiness */
2087        ret = fc_block_scsi_eh(cmnd);
2088        if (ret)
2089                return ret;
2090
2091        /*
2092         * If we have blocked in the previous call, at this point, either the
2093         * remote node has come back online, or device loss timer has fired
2094         * and the remote node is destroyed. Allow the LUN reset only for
2095         * the former case, since LUN reset is a TMF I/O on the wire, and we
2096         * need a valid session to issue it.
2097         */
2098        if (fc_remote_port_chkready(rn->rport)) {
2099                csio_err(hw,
2100                         "LUN reset cannot be issued on non-ready"
2101                         " remote node ssni:0x%x (LUN:%llu)\n",
2102                         rn->flowid, cmnd->device->lun);
2103                goto fail;
2104        }
2105
2106        /* Get a free ioreq structure - SM is already set to uninit */
2107        ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
2108
2109        if (!ioreq) {
2110                csio_err(hw, "Out of IO request elements. Active # :%d\n",
2111                         scsim->stats.n_active);
2112                goto fail;
2113        }
2114
2115        sqset                   = &hw->sqset[ln->portid][smp_processor_id()];
2116        ioreq->nsge             = 0;
2117        ioreq->lnode            = ln;
2118        ioreq->rnode            = rn;
2119        ioreq->iq_idx           = sqset->iq_idx;
2120        ioreq->eq_idx           = sqset->eq_idx;
2121
2122        csio_scsi_cmnd(ioreq)   = cmnd;
2123        cmnd->host_scribble     = (unsigned char *)ioreq;
2124        cmnd->SCp.Status        = 0;
2125
2126        cmnd->SCp.Message       = FCP_TMF_LUN_RESET;
2127        ioreq->tmo              = CSIO_SCSI_LUNRST_TMO_MS / 1000;
2128
2129        /*
2130         * FW times the LUN reset for ioreq->tmo, so we got to wait a little
2131         * longer (10s for now) than that to allow FW to return the timed
2132         * out command.
2133         */
2134        count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
2135
2136        /* Set cbfn */
2137        ioreq->io_cbfn = csio_tm_cbfn;
2138
2139        /* Save of the ioreq info for later use */
2140        sld.level = CSIO_LEV_LUN;
2141        sld.lnode = ioreq->lnode;
2142        sld.rnode = ioreq->rnode;
2143        sld.oslun = cmnd->device->lun;
2144
2145        spin_lock_irqsave(&hw->lock, flags);
2146        /* Kick off TM SM on the ioreq */
2147        retval = csio_scsi_start_tm(ioreq);
2148        spin_unlock_irqrestore(&hw->lock, flags);
2149
2150        if (retval != 0) {
2151                csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
2152                            ioreq, retval);
2153                goto fail_ret_ioreq;
2154        }
2155
2156        csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
2157                    count * (CSIO_SCSI_TM_POLL_MS / 1000));
2158        /* Wait for completion */
2159        while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
2160                                                                && count--)
2161                msleep(CSIO_SCSI_TM_POLL_MS);
2162
2163        /* LUN reset timed-out */
2164        if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
2165                csio_err(hw, "LUN reset (%d:%llu) timed out\n",
2166                         cmnd->device->id, cmnd->device->lun);
2167
2168                spin_lock_irq(&hw->lock);
2169                csio_scsi_drvcleanup(ioreq);
2170                list_del_init(&ioreq->sm.sm_list);
2171                spin_unlock_irq(&hw->lock);
2172
2173                goto fail_ret_ioreq;
2174        }
2175
2176        /* LUN reset returned, check cached status */
2177        if (cmnd->SCp.Status != FW_SUCCESS) {
2178                csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
2179                         cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
2180                goto fail;
2181        }
2182
2183        /* LUN reset succeeded, Start aborting affected I/Os */
2184        /*
2185         * Since the host guarantees during LUN reset that there
2186         * will not be any more I/Os to that LUN, until the LUN reset
2187         * completes, we gather pending I/Os after the LUN reset.
2188         */
2189        spin_lock_irq(&hw->lock);
2190        csio_scsi_gather_active_ios(scsim, &sld, &local_q);
2191
2192        retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
2193        spin_unlock_irq(&hw->lock);
2194
2195        /* Aborts may have timed out */
2196        if (retval != 0) {
2197                csio_err(hw,
2198                         "Attempt to abort I/Os during LUN reset of %llu"
2199                         " returned %d\n", cmnd->device->lun, retval);
2200                /* Return I/Os back to active_q */
2201                spin_lock_irq(&hw->lock);
2202                list_splice_tail_init(&local_q, &scsim->active_q);
2203                spin_unlock_irq(&hw->lock);
2204                goto fail;
2205        }
2206
2207        CSIO_INC_STATS(rn, n_lun_rst);
2208
2209        csio_info(hw, "LUN reset occurred (%d:%llu)\n",
2210                  cmnd->device->id, cmnd->device->lun);
2211
2212        return SUCCESS;
2213
2214fail_ret_ioreq:
2215        csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
2216fail:
2217        CSIO_INC_STATS(rn, n_lun_rst_fail);
2218        return FAILED;
2219}
2220
2221static int
2222csio_slave_alloc(struct scsi_device *sdev)
2223{
2224        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2225
2226        if (!rport || fc_remote_port_chkready(rport))
2227                return -ENXIO;
2228
2229        sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
2230
2231        return 0;
2232}
2233
2234static int
2235csio_slave_configure(struct scsi_device *sdev)
2236{
2237        scsi_change_queue_depth(sdev, csio_lun_qdepth);
2238        return 0;
2239}
2240
2241static void
2242csio_slave_destroy(struct scsi_device *sdev)
2243{
2244        sdev->hostdata = NULL;
2245}
2246
2247static int
2248csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
2249{
2250        struct csio_lnode *ln = shost_priv(shost);
2251        int rv = 1;
2252
2253        spin_lock_irq(shost->host_lock);
2254        if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
2255                goto out;
2256
2257        rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
2258                            csio_delta_scan_tmo * HZ);
2259out:
2260        spin_unlock_irq(shost->host_lock);
2261
2262        return rv;
2263}
2264
2265struct scsi_host_template csio_fcoe_shost_template = {
2266        .module                 = THIS_MODULE,
2267        .name                   = CSIO_DRV_DESC,
2268        .proc_name              = KBUILD_MODNAME,
2269        .queuecommand           = csio_queuecommand,
2270        .eh_timed_out           = fc_eh_timed_out,
2271        .eh_abort_handler       = csio_eh_abort_handler,
2272        .eh_device_reset_handler = csio_eh_lun_reset_handler,
2273        .slave_alloc            = csio_slave_alloc,
2274        .slave_configure        = csio_slave_configure,
2275        .slave_destroy          = csio_slave_destroy,
2276        .scan_finished          = csio_scan_finished,
2277        .this_id                = -1,
2278        .sg_tablesize           = CSIO_SCSI_MAX_SGE,
2279        .cmd_per_lun            = CSIO_MAX_CMD_PER_LUN,
2280        .shost_attrs            = csio_fcoe_lport_attrs,
2281        .max_sectors            = CSIO_MAX_SECTOR_SIZE,
2282};
2283
2284struct scsi_host_template csio_fcoe_shost_vport_template = {
2285        .module                 = THIS_MODULE,
2286        .name                   = CSIO_DRV_DESC,
2287        .proc_name              = KBUILD_MODNAME,
2288        .queuecommand           = csio_queuecommand,
2289        .eh_timed_out           = fc_eh_timed_out,
2290        .eh_abort_handler       = csio_eh_abort_handler,
2291        .eh_device_reset_handler = csio_eh_lun_reset_handler,
2292        .slave_alloc            = csio_slave_alloc,
2293        .slave_configure        = csio_slave_configure,
2294        .slave_destroy          = csio_slave_destroy,
2295        .scan_finished          = csio_scan_finished,
2296        .this_id                = -1,
2297        .sg_tablesize           = CSIO_SCSI_MAX_SGE,
2298        .cmd_per_lun            = CSIO_MAX_CMD_PER_LUN,
2299        .shost_attrs            = csio_fcoe_vport_attrs,
2300        .max_sectors            = CSIO_MAX_SECTOR_SIZE,
2301};
2302
2303/*
2304 * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
2305 * @scm: SCSI Module
2306 * @hw: HW device.
2307 * @buf_size: buffer size
2308 * @num_buf : Number of buffers.
2309 *
2310 * This routine allocates DMA buffers required for SCSI Data xfer, if
2311 * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
2312 * not virtually contiguous.
2313 */
2314static int
2315csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
2316                         int buf_size, int num_buf)
2317{
2318        int n = 0;
2319        struct list_head *tmp;
2320        struct csio_dma_buf *ddp_desc = NULL;
2321        uint32_t unit_size = 0;
2322
2323        if (!num_buf)
2324                return 0;
2325
2326        if (!buf_size)
2327                return -EINVAL;
2328
2329        INIT_LIST_HEAD(&scm->ddp_freelist);
2330
2331        /* Align buf size to page size */
2332        buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
2333        /* Initialize dma descriptors */
2334        for (n = 0; n < num_buf; n++) {
2335                /* Set unit size to request size */
2336                unit_size = buf_size;
2337                ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
2338                if (!ddp_desc) {
2339                        csio_err(hw,
2340                                 "Failed to allocate ddp descriptors,"
2341                                 " Num allocated = %d.\n",
2342                                 scm->stats.n_free_ddp);
2343                        goto no_mem;
2344                }
2345
2346                /* Allocate Dma buffers for DDP */
2347                ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
2348                                &ddp_desc->paddr, GFP_KERNEL);
2349                if (!ddp_desc->vaddr) {
2350                        csio_err(hw,
2351                                 "SCSI response DMA buffer (ddp) allocation"
2352                                 " failed!\n");
2353                        kfree(ddp_desc);
2354                        goto no_mem;
2355                }
2356
2357                ddp_desc->len = unit_size;
2358
2359                /* Added it to scsi ddp freelist */
2360                list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
2361                CSIO_INC_STATS(scm, n_free_ddp);
2362        }
2363
2364        return 0;
2365no_mem:
2366        /* release dma descs back to freelist and free dma memory */
2367        list_for_each(tmp, &scm->ddp_freelist) {
2368                ddp_desc = (struct csio_dma_buf *) tmp;
2369                tmp = csio_list_prev(tmp);
2370                dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2371                                  ddp_desc->vaddr, ddp_desc->paddr);
2372                list_del_init(&ddp_desc->list);
2373                kfree(ddp_desc);
2374        }
2375        scm->stats.n_free_ddp = 0;
2376
2377        return -ENOMEM;
2378}
2379
2380/*
2381 * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
2382 * @scm: SCSI Module
2383 * @hw: HW device.
2384 *
2385 * This routine frees ddp buffers.
2386 */
2387static void
2388csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
2389{
2390        struct list_head *tmp;
2391        struct csio_dma_buf *ddp_desc;
2392
2393        /* release dma descs back to freelist and free dma memory */
2394        list_for_each(tmp, &scm->ddp_freelist) {
2395                ddp_desc = (struct csio_dma_buf *) tmp;
2396                tmp = csio_list_prev(tmp);
2397                dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2398                                  ddp_desc->vaddr, ddp_desc->paddr);
2399                list_del_init(&ddp_desc->list);
2400                kfree(ddp_desc);
2401        }
2402        scm->stats.n_free_ddp = 0;
2403}
2404
2405/**
2406 * csio_scsim_init - Initialize SCSI Module
2407 * @scm:        SCSI Module
2408 * @hw:         HW module
2409 *
2410 */
2411int
2412csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
2413{
2414        int i;
2415        struct csio_ioreq *ioreq;
2416        struct csio_dma_buf *dma_buf;
2417
2418        INIT_LIST_HEAD(&scm->active_q);
2419        scm->hw = hw;
2420
2421        scm->proto_cmd_len = sizeof(struct fcp_cmnd);
2422        scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
2423        scm->max_sge = CSIO_SCSI_MAX_SGE;
2424
2425        spin_lock_init(&scm->freelist_lock);
2426
2427        /* Pre-allocate ioreqs and initialize them */
2428        INIT_LIST_HEAD(&scm->ioreq_freelist);
2429        for (i = 0; i < csio_scsi_ioreqs; i++) {
2430
2431                ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
2432                if (!ioreq) {
2433                        csio_err(hw,
2434                                 "I/O request element allocation failed, "
2435                                 " Num allocated = %d.\n",
2436                                 scm->stats.n_free_ioreq);
2437
2438                        goto free_ioreq;
2439                }
2440
2441                /* Allocate Dma buffers for Response Payload */
2442                dma_buf = &ioreq->dma_buf;
2443                dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
2444                                                &dma_buf->paddr);
2445                if (!dma_buf->vaddr) {
2446                        csio_err(hw,
2447                                 "SCSI response DMA buffer allocation"
2448                                 " failed!\n");
2449                        kfree(ioreq);
2450                        goto free_ioreq;
2451                }
2452
2453                dma_buf->len = scm->proto_rsp_len;
2454
2455                /* Set state to uninit */
2456                csio_init_state(&ioreq->sm, csio_scsis_uninit);
2457                INIT_LIST_HEAD(&ioreq->gen_list);
2458                init_completion(&ioreq->cmplobj);
2459
2460                list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
2461                CSIO_INC_STATS(scm, n_free_ioreq);
2462        }
2463
2464        if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
2465                goto free_ioreq;
2466
2467        return 0;
2468
2469free_ioreq:
2470        /*
2471         * Free up existing allocations, since an error
2472         * from here means we are returning for good
2473         */
2474        while (!list_empty(&scm->ioreq_freelist)) {
2475                struct csio_sm *tmp;
2476
2477                tmp = list_first_entry(&scm->ioreq_freelist,
2478                                       struct csio_sm, sm_list);
2479                list_del_init(&tmp->sm_list);
2480                ioreq = (struct csio_ioreq *)tmp;
2481
2482                dma_buf = &ioreq->dma_buf;
2483                dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
2484                              dma_buf->paddr);
2485
2486                kfree(ioreq);
2487        }
2488
2489        scm->stats.n_free_ioreq = 0;
2490
2491        return -ENOMEM;
2492}
2493
2494/**
2495 * csio_scsim_exit: Uninitialize SCSI Module
2496 * @scm: SCSI Module
2497 *
2498 */
2499void
2500csio_scsim_exit(struct csio_scsim *scm)
2501{
2502        struct csio_ioreq *ioreq;
2503        struct csio_dma_buf *dma_buf;
2504
2505        while (!list_empty(&scm->ioreq_freelist)) {
2506                struct csio_sm *tmp;
2507
2508                tmp = list_first_entry(&scm->ioreq_freelist,
2509                                       struct csio_sm, sm_list);
2510                list_del_init(&tmp->sm_list);
2511                ioreq = (struct csio_ioreq *)tmp;
2512
2513                dma_buf = &ioreq->dma_buf;
2514                dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
2515                              dma_buf->paddr);
2516
2517                kfree(ioreq);
2518        }
2519
2520        scm->stats.n_free_ioreq = 0;
2521
2522        csio_scsi_free_ddp_bufs(scm, scm->hw);
2523}
2524