linux/drivers/scsi/fnic/fnic_scsi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#include <linux/mempool.h>
  19#include <linux/errno.h>
  20#include <linux/init.h>
  21#include <linux/workqueue.h>
  22#include <linux/pci.h>
  23#include <linux/scatterlist.h>
  24#include <linux/skbuff.h>
  25#include <linux/spinlock.h>
  26#include <linux/etherdevice.h>
  27#include <linux/if_ether.h>
  28#include <linux/if_vlan.h>
  29#include <linux/delay.h>
  30#include <linux/gfp.h>
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_host.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_cmnd.h>
  35#include <scsi/scsi_tcq.h>
  36#include <scsi/fc/fc_els.h>
  37#include <scsi/fc/fc_fcoe.h>
  38#include <scsi/libfc.h>
  39#include <scsi/fc_frame.h>
  40#include "fnic_io.h"
  41#include "fnic.h"
  42
  43const char *fnic_state_str[] = {
  44        [FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
  45        [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  46        [FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
  47        [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  48};
  49
  50static const char *fnic_ioreq_state_str[] = {
  51        [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
  52        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  53        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  54        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  55        [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  56};
  57
  58static const char *fcpio_status_str[] =  {
  59        [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  60        [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  61        [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  62        [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  63        [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  64        [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  65        [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  66        [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  67        [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  68        [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  69        [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  70        [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  71        [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  72        [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  73        [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  74        [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  75        [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  76        [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  77        [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  78};
  79
  80const char *fnic_state_to_str(unsigned int state)
  81{
  82        if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  83                return "unknown";
  84
  85        return fnic_state_str[state];
  86}
  87
  88static const char *fnic_ioreq_state_to_str(unsigned int state)
  89{
  90        if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  91            !fnic_ioreq_state_str[state])
  92                return "unknown";
  93
  94        return fnic_ioreq_state_str[state];
  95}
  96
  97static const char *fnic_fcpio_status_to_str(unsigned int status)
  98{
  99        if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
 100                return "unknown";
 101
 102        return fcpio_status_str[status];
 103}
 104
 105static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
 106
 107static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
 108                                            struct scsi_cmnd *sc)
 109{
 110        u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
 111
 112        return &fnic->io_req_lock[hash];
 113}
 114
 115static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
 116                                            int tag)
 117{
 118        return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
 119}
 120
 121/*
 122 * Unmap the data buffer and sense buffer for an io_req,
 123 * also unmap and free the device-private scatter/gather list.
 124 */
 125static void fnic_release_ioreq_buf(struct fnic *fnic,
 126                                   struct fnic_io_req *io_req,
 127                                   struct scsi_cmnd *sc)
 128{
 129        if (io_req->sgl_list_pa)
 130                dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
 131                                 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
 132                                 DMA_TO_DEVICE);
 133        scsi_dma_unmap(sc);
 134
 135        if (io_req->sgl_cnt)
 136                mempool_free(io_req->sgl_list_alloc,
 137                             fnic->io_sgl_pool[io_req->sgl_type]);
 138        if (io_req->sense_buf_pa)
 139                dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
 140                                 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 141}
 142
 143/* Free up Copy Wq descriptors. Called with copy_wq lock held */
 144static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 145{
 146        /* if no Ack received from firmware, then nothing to clean */
 147        if (!fnic->fw_ack_recd[0])
 148                return 1;
 149
 150        /*
 151         * Update desc_available count based on number of freed descriptors
 152         * Account for wraparound
 153         */
 154        if (wq->to_clean_index <= fnic->fw_ack_index[0])
 155                wq->ring.desc_avail += (fnic->fw_ack_index[0]
 156                                        - wq->to_clean_index + 1);
 157        else
 158                wq->ring.desc_avail += (wq->ring.desc_count
 159                                        - wq->to_clean_index
 160                                        + fnic->fw_ack_index[0] + 1);
 161
 162        /*
 163         * just bump clean index to ack_index+1 accounting for wraparound
 164         * this will essentially free up all descriptors between
 165         * to_clean_index and fw_ack_index, both inclusive
 166         */
 167        wq->to_clean_index =
 168                (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
 169
 170        /* we have processed the acks received so far */
 171        fnic->fw_ack_recd[0] = 0;
 172        return 0;
 173}
 174
 175
 176/**
 177 * __fnic_set_state_flags
 178 * Sets/Clears bits in fnic's state_flags
 179 **/
 180void
 181__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
 182                        unsigned long clearbits)
 183{
 184        unsigned long flags = 0;
 185        unsigned long host_lock_flags = 0;
 186
 187        spin_lock_irqsave(&fnic->fnic_lock, flags);
 188        spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
 189
 190        if (clearbits)
 191                fnic->state_flags &= ~st_flags;
 192        else
 193                fnic->state_flags |= st_flags;
 194
 195        spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
 196        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 197
 198        return;
 199}
 200
 201
 202/*
 203 * fnic_fw_reset_handler
 204 * Routine to send reset msg to fw
 205 */
 206int fnic_fw_reset_handler(struct fnic *fnic)
 207{
 208        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 209        int ret = 0;
 210        unsigned long flags;
 211
 212        /* indicate fwreset to io path */
 213        fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
 214
 215        skb_queue_purge(&fnic->frame_queue);
 216        skb_queue_purge(&fnic->tx_queue);
 217
 218        /* wait for io cmpl */
 219        while (atomic_read(&fnic->in_flight))
 220                schedule_timeout(msecs_to_jiffies(1));
 221
 222        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 223
 224        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 225                free_wq_copy_descs(fnic, wq);
 226
 227        if (!vnic_wq_copy_desc_avail(wq))
 228                ret = -EAGAIN;
 229        else {
 230                fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
 231                atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 232                if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 233                          atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 234                        atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 235                                atomic64_read(
 236                                  &fnic->fnic_stats.fw_stats.active_fw_reqs));
 237        }
 238
 239        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 240
 241        if (!ret) {
 242                atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
 243                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 244                              "Issued fw reset\n");
 245        } else {
 246                fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 247                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 248                              "Failed to issue fw reset\n");
 249        }
 250
 251        return ret;
 252}
 253
 254
 255/*
 256 * fnic_flogi_reg_handler
 257 * Routine to send flogi register msg to fw
 258 */
 259int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
 260{
 261        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 262        enum fcpio_flogi_reg_format_type format;
 263        struct fc_lport *lp = fnic->lport;
 264        u8 gw_mac[ETH_ALEN];
 265        int ret = 0;
 266        unsigned long flags;
 267
 268        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 269
 270        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 271                free_wq_copy_descs(fnic, wq);
 272
 273        if (!vnic_wq_copy_desc_avail(wq)) {
 274                ret = -EAGAIN;
 275                goto flogi_reg_ioreq_end;
 276        }
 277
 278        if (fnic->ctlr.map_dest) {
 279                eth_broadcast_addr(gw_mac);
 280                format = FCPIO_FLOGI_REG_DEF_DEST;
 281        } else {
 282                memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
 283                format = FCPIO_FLOGI_REG_GW_DEST;
 284        }
 285
 286        if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
 287                fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
 288                                                fc_id, gw_mac,
 289                                                fnic->data_src_addr,
 290                                                lp->r_a_tov, lp->e_d_tov);
 291                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 292                              "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
 293                              fc_id, fnic->data_src_addr, gw_mac);
 294        } else {
 295                fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
 296                                                  format, fc_id, gw_mac);
 297                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 298                              "FLOGI reg issued fcid %x map %d dest %pM\n",
 299                              fc_id, fnic->ctlr.map_dest, gw_mac);
 300        }
 301
 302        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 303        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 304                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 305                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 306                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 307
 308flogi_reg_ioreq_end:
 309        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 310        return ret;
 311}
 312
 313/*
 314 * fnic_queue_wq_copy_desc
 315 * Routine to enqueue a wq copy desc
 316 */
 317static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 318                                          struct vnic_wq_copy *wq,
 319                                          struct fnic_io_req *io_req,
 320                                          struct scsi_cmnd *sc,
 321                                          int sg_count)
 322{
 323        struct scatterlist *sg;
 324        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 325        struct fc_rport_libfc_priv *rp = rport->dd_data;
 326        struct host_sg_desc *desc;
 327        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 328        unsigned int i;
 329        unsigned long intr_flags;
 330        int flags;
 331        u8 exch_flags;
 332        struct scsi_lun fc_lun;
 333
 334        if (sg_count) {
 335                /* For each SGE, create a device desc entry */
 336                desc = io_req->sgl_list;
 337                for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
 338                        desc->addr = cpu_to_le64(sg_dma_address(sg));
 339                        desc->len = cpu_to_le32(sg_dma_len(sg));
 340                        desc->_resvd = 0;
 341                        desc++;
 342                }
 343
 344                io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
 345                                io_req->sgl_list,
 346                                sizeof(io_req->sgl_list[0]) * sg_count,
 347                                DMA_TO_DEVICE);
 348                if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
 349                        printk(KERN_ERR "DMA mapping failed\n");
 350                        return SCSI_MLQUEUE_HOST_BUSY;
 351                }
 352        }
 353
 354        io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
 355                                              sc->sense_buffer,
 356                                              SCSI_SENSE_BUFFERSIZE,
 357                                              DMA_FROM_DEVICE);
 358        if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
 359                dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
 360                                sizeof(io_req->sgl_list[0]) * sg_count,
 361                                DMA_TO_DEVICE);
 362                printk(KERN_ERR "DMA mapping failed\n");
 363                return SCSI_MLQUEUE_HOST_BUSY;
 364        }
 365
 366        int_to_scsilun(sc->device->lun, &fc_lun);
 367
 368        /* Enqueue the descriptor in the Copy WQ */
 369        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 370
 371        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 372                free_wq_copy_descs(fnic, wq);
 373
 374        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
 375                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 376                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 377                          "fnic_queue_wq_copy_desc failure - no descriptors\n");
 378                atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
 379                return SCSI_MLQUEUE_HOST_BUSY;
 380        }
 381
 382        flags = 0;
 383        if (sc->sc_data_direction == DMA_FROM_DEVICE)
 384                flags = FCPIO_ICMND_RDDATA;
 385        else if (sc->sc_data_direction == DMA_TO_DEVICE)
 386                flags = FCPIO_ICMND_WRDATA;
 387
 388        exch_flags = 0;
 389        if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
 390            (rp->flags & FC_RP_FLAGS_RETRY))
 391                exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
 392
 393        fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
 394                                         0, exch_flags, io_req->sgl_cnt,
 395                                         SCSI_SENSE_BUFFERSIZE,
 396                                         io_req->sgl_list_pa,
 397                                         io_req->sense_buf_pa,
 398                                         0, /* scsi cmd ref, always 0 */
 399                                         FCPIO_ICMND_PTA_SIMPLE,
 400                                                /* scsi pri and tag */
 401                                         flags, /* command flags */
 402                                         sc->cmnd, sc->cmd_len,
 403                                         scsi_bufflen(sc),
 404                                         fc_lun.scsi_lun, io_req->port_id,
 405                                         rport->maxframe_size, rp->r_a_tov,
 406                                         rp->e_d_tov);
 407
 408        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 409        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 410                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 411                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 412                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 413
 414        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 415        return 0;
 416}
 417
 418/*
 419 * fnic_queuecommand
 420 * Routine to send a scsi cdb
 421 * Called with host_lock held and interrupts disabled.
 422 */
 423static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 424{
 425        struct fc_lport *lp = shost_priv(sc->device->host);
 426        struct fc_rport *rport;
 427        struct fnic_io_req *io_req = NULL;
 428        struct fnic *fnic = lport_priv(lp);
 429        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 430        struct vnic_wq_copy *wq;
 431        int ret;
 432        u64 cmd_trace;
 433        int sg_count = 0;
 434        unsigned long flags = 0;
 435        unsigned long ptr;
 436        spinlock_t *io_lock = NULL;
 437        int io_lock_acquired = 0;
 438        struct fc_rport_libfc_priv *rp;
 439
 440        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
 441                return SCSI_MLQUEUE_HOST_BUSY;
 442
 443        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
 444                return SCSI_MLQUEUE_HOST_BUSY;
 445
 446        rport = starget_to_rport(scsi_target(sc->device));
 447        if (!rport) {
 448                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 449                                "returning DID_NO_CONNECT for IO as rport is NULL\n");
 450                sc->result = DID_NO_CONNECT << 16;
 451                done(sc);
 452                return 0;
 453        }
 454
 455        ret = fc_remote_port_chkready(rport);
 456        if (ret) {
 457                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 458                                "rport is not ready\n");
 459                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 460                sc->result = ret;
 461                done(sc);
 462                return 0;
 463        }
 464
 465        rp = rport->dd_data;
 466        if (!rp || rp->rp_state == RPORT_ST_DELETE) {
 467                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 468                        "rport 0x%x removed, returning DID_NO_CONNECT\n",
 469                        rport->port_id);
 470
 471                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 472                sc->result = DID_NO_CONNECT<<16;
 473                done(sc);
 474                return 0;
 475        }
 476
 477        if (rp->rp_state != RPORT_ST_READY) {
 478                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 479                        "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
 480                        rport->port_id, rp->rp_state);
 481
 482                sc->result = DID_IMM_RETRY << 16;
 483                done(sc);
 484                return 0;
 485        }
 486
 487        if (lp->state != LPORT_ST_READY || !(lp->link_up))
 488                return SCSI_MLQUEUE_HOST_BUSY;
 489
 490        atomic_inc(&fnic->in_flight);
 491
 492        /*
 493         * Release host lock, use driver resource specific locks from here.
 494         * Don't re-enable interrupts in case they were disabled prior to the
 495         * caller disabling them.
 496         */
 497        spin_unlock(lp->host->host_lock);
 498        CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
 499        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 500
 501        /* Get a new io_req for this SCSI IO */
 502        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
 503        if (!io_req) {
 504                atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 505                ret = SCSI_MLQUEUE_HOST_BUSY;
 506                goto out;
 507        }
 508        memset(io_req, 0, sizeof(*io_req));
 509
 510        /* Map the data buffer */
 511        sg_count = scsi_dma_map(sc);
 512        if (sg_count < 0) {
 513                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 514                          sc->request->tag, sc, 0, sc->cmnd[0],
 515                          sg_count, CMD_STATE(sc));
 516                mempool_free(io_req, fnic->io_req_pool);
 517                goto out;
 518        }
 519
 520        /* Determine the type of scatter/gather list we need */
 521        io_req->sgl_cnt = sg_count;
 522        io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
 523        if (sg_count > FNIC_DFLT_SG_DESC_CNT)
 524                io_req->sgl_type = FNIC_SGL_CACHE_MAX;
 525
 526        if (sg_count) {
 527                io_req->sgl_list =
 528                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
 529                                      GFP_ATOMIC);
 530                if (!io_req->sgl_list) {
 531                        atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 532                        ret = SCSI_MLQUEUE_HOST_BUSY;
 533                        scsi_dma_unmap(sc);
 534                        mempool_free(io_req, fnic->io_req_pool);
 535                        goto out;
 536                }
 537
 538                /* Cache sgl list allocated address before alignment */
 539                io_req->sgl_list_alloc = io_req->sgl_list;
 540                ptr = (unsigned long) io_req->sgl_list;
 541                if (ptr % FNIC_SG_DESC_ALIGN) {
 542                        io_req->sgl_list = (struct host_sg_desc *)
 543                                (((unsigned long) ptr
 544                                  + FNIC_SG_DESC_ALIGN - 1)
 545                                 & ~(FNIC_SG_DESC_ALIGN - 1));
 546                }
 547        }
 548
 549        /*
 550        * Will acquire lock defore setting to IO initialized.
 551        */
 552
 553        io_lock = fnic_io_lock_hash(fnic, sc);
 554        spin_lock_irqsave(io_lock, flags);
 555
 556        /* initialize rest of io_req */
 557        io_lock_acquired = 1;
 558        io_req->port_id = rport->port_id;
 559        io_req->start_time = jiffies;
 560        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
 561        CMD_SP(sc) = (char *)io_req;
 562        CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
 563        sc->scsi_done = done;
 564
 565        /* create copy wq desc and enqueue it */
 566        wq = &fnic->wq_copy[0];
 567        ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
 568        if (ret) {
 569                /*
 570                 * In case another thread cancelled the request,
 571                 * refetch the pointer under the lock.
 572                 */
 573                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 574                          sc->request->tag, sc, 0, 0, 0,
 575                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
 576                io_req = (struct fnic_io_req *)CMD_SP(sc);
 577                CMD_SP(sc) = NULL;
 578                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 579                spin_unlock_irqrestore(io_lock, flags);
 580                if (io_req) {
 581                        fnic_release_ioreq_buf(fnic, io_req, sc);
 582                        mempool_free(io_req, fnic->io_req_pool);
 583                }
 584                atomic_dec(&fnic->in_flight);
 585                /* acquire host lock before returning to SCSI */
 586                spin_lock(lp->host->host_lock);
 587                return ret;
 588        } else {
 589                atomic64_inc(&fnic_stats->io_stats.active_ios);
 590                atomic64_inc(&fnic_stats->io_stats.num_ios);
 591                if (atomic64_read(&fnic_stats->io_stats.active_ios) >
 592                          atomic64_read(&fnic_stats->io_stats.max_active_ios))
 593                        atomic64_set(&fnic_stats->io_stats.max_active_ios,
 594                             atomic64_read(&fnic_stats->io_stats.active_ios));
 595
 596                /* REVISIT: Use per IO lock in the final code */
 597                CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
 598        }
 599out:
 600        cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
 601                        (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
 602                        (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
 603                        sc->cmnd[5]);
 604
 605        FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 606                  sc->request->tag, sc, io_req,
 607                  sg_count, cmd_trace,
 608                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 609
 610        /* if only we issued IO, will we have the io lock */
 611        if (io_lock_acquired)
 612                spin_unlock_irqrestore(io_lock, flags);
 613
 614        atomic_dec(&fnic->in_flight);
 615        /* acquire host lock before returning to SCSI */
 616        spin_lock(lp->host->host_lock);
 617        return ret;
 618}
 619
 620DEF_SCSI_QCMD(fnic_queuecommand)
 621
 622/*
 623 * fnic_fcpio_fw_reset_cmpl_handler
 624 * Routine to handle fw reset completion
 625 */
 626static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 627                                            struct fcpio_fw_req *desc)
 628{
 629        u8 type;
 630        u8 hdr_status;
 631        struct fcpio_tag tag;
 632        int ret = 0;
 633        unsigned long flags;
 634        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
 635
 636        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 637
 638        atomic64_inc(&reset_stats->fw_reset_completions);
 639
 640        /* Clean up all outstanding io requests */
 641        fnic_cleanup_io(fnic, SCSI_NO_TAG);
 642
 643        atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
 644        atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
 645        atomic64_set(&fnic->io_cmpl_skip, 0);
 646
 647        spin_lock_irqsave(&fnic->fnic_lock, flags);
 648
 649        /* fnic should be in FC_TRANS_ETH_MODE */
 650        if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
 651                /* Check status of reset completion */
 652                if (!hdr_status) {
 653                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 654                                      "reset cmpl success\n");
 655                        /* Ready to send flogi out */
 656                        fnic->state = FNIC_IN_ETH_MODE;
 657                } else {
 658                        FNIC_SCSI_DBG(KERN_DEBUG,
 659                                      fnic->lport->host,
 660                                      "fnic fw_reset : failed %s\n",
 661                                      fnic_fcpio_status_to_str(hdr_status));
 662
 663                        /*
 664                         * Unable to change to eth mode, cannot send out flogi
 665                         * Change state to fc mode, so that subsequent Flogi
 666                         * requests from libFC will cause more attempts to
 667                         * reset the firmware. Free the cached flogi
 668                         */
 669                        fnic->state = FNIC_IN_FC_MODE;
 670                        atomic64_inc(&reset_stats->fw_reset_failures);
 671                        ret = -1;
 672                }
 673        } else {
 674                FNIC_SCSI_DBG(KERN_DEBUG,
 675                              fnic->lport->host,
 676                              "Unexpected state %s while processing"
 677                              " reset cmpl\n", fnic_state_to_str(fnic->state));
 678                atomic64_inc(&reset_stats->fw_reset_failures);
 679                ret = -1;
 680        }
 681
 682        /* Thread removing device blocks till firmware reset is complete */
 683        if (fnic->remove_wait)
 684                complete(fnic->remove_wait);
 685
 686        /*
 687         * If fnic is being removed, or fw reset failed
 688         * free the flogi frame. Else, send it out
 689         */
 690        if (fnic->remove_wait || ret) {
 691                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 692                skb_queue_purge(&fnic->tx_queue);
 693                goto reset_cmpl_handler_end;
 694        }
 695
 696        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 697
 698        fnic_flush_tx(fnic);
 699
 700 reset_cmpl_handler_end:
 701        fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 702
 703        return ret;
 704}
 705
 706/*
 707 * fnic_fcpio_flogi_reg_cmpl_handler
 708 * Routine to handle flogi register completion
 709 */
 710static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 711                                             struct fcpio_fw_req *desc)
 712{
 713        u8 type;
 714        u8 hdr_status;
 715        struct fcpio_tag tag;
 716        int ret = 0;
 717        unsigned long flags;
 718
 719        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 720
 721        /* Update fnic state based on status of flogi reg completion */
 722        spin_lock_irqsave(&fnic->fnic_lock, flags);
 723
 724        if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
 725
 726                /* Check flogi registration completion status */
 727                if (!hdr_status) {
 728                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 729                                      "flog reg succeeded\n");
 730                        fnic->state = FNIC_IN_FC_MODE;
 731                } else {
 732                        FNIC_SCSI_DBG(KERN_DEBUG,
 733                                      fnic->lport->host,
 734                                      "fnic flogi reg :failed %s\n",
 735                                      fnic_fcpio_status_to_str(hdr_status));
 736                        fnic->state = FNIC_IN_ETH_MODE;
 737                        ret = -1;
 738                }
 739        } else {
 740                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 741                              "Unexpected fnic state %s while"
 742                              " processing flogi reg completion\n",
 743                              fnic_state_to_str(fnic->state));
 744                ret = -1;
 745        }
 746
 747        if (!ret) {
 748                if (fnic->stop_rx_link_events) {
 749                        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 750                        goto reg_cmpl_handler_end;
 751                }
 752                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 753
 754                fnic_flush_tx(fnic);
 755                queue_work(fnic_event_queue, &fnic->frame_work);
 756        } else {
 757                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 758        }
 759
 760reg_cmpl_handler_end:
 761        return ret;
 762}
 763
 764static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
 765                                        u16 request_out)
 766{
 767        if (wq->to_clean_index <= wq->to_use_index) {
 768                /* out of range, stale request_out index */
 769                if (request_out < wq->to_clean_index ||
 770                    request_out >= wq->to_use_index)
 771                        return 0;
 772        } else {
 773                /* out of range, stale request_out index */
 774                if (request_out < wq->to_clean_index &&
 775                    request_out >= wq->to_use_index)
 776                        return 0;
 777        }
 778        /* request_out index is in range */
 779        return 1;
 780}
 781
 782
 783/*
 784 * Mark that ack received and store the Ack index. If there are multiple
 785 * acks received before Tx thread cleans it up, the latest value will be
 786 * used which is correct behavior. This state should be in the copy Wq
 787 * instead of in the fnic
 788 */
 789static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
 790                                          unsigned int cq_index,
 791                                          struct fcpio_fw_req *desc)
 792{
 793        struct vnic_wq_copy *wq;
 794        u16 request_out = desc->u.ack.request_out;
 795        unsigned long flags;
 796        u64 *ox_id_tag = (u64 *)(void *)desc;
 797
 798        /* mark the ack state */
 799        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
 800        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 801
 802        fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
 803        if (is_ack_index_in_range(wq, request_out)) {
 804                fnic->fw_ack_index[0] = request_out;
 805                fnic->fw_ack_recd[0] = 1;
 806        } else
 807                atomic64_inc(
 808                        &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
 809
 810        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 811        FNIC_TRACE(fnic_fcpio_ack_handler,
 812                  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
 813                  ox_id_tag[4], ox_id_tag[5]);
 814}
 815
 816/*
 817 * fnic_fcpio_icmnd_cmpl_handler
 818 * Routine to handle icmnd completions
 819 */
 820static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 821                                         struct fcpio_fw_req *desc)
 822{
 823        u8 type;
 824        u8 hdr_status;
 825        struct fcpio_tag tag;
 826        u32 id;
 827        u64 xfer_len = 0;
 828        struct fcpio_icmnd_cmpl *icmnd_cmpl;
 829        struct fnic_io_req *io_req;
 830        struct scsi_cmnd *sc;
 831        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 832        unsigned long flags;
 833        spinlock_t *io_lock;
 834        u64 cmd_trace;
 835        unsigned long start_time;
 836        unsigned long io_duration_time;
 837
 838        /* Decode the cmpl description to get the io_req id */
 839        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 840        fcpio_tag_id_dec(&tag, &id);
 841        icmnd_cmpl = &desc->u.icmnd_cmpl;
 842
 843        if (id >= fnic->fnic_max_tag_id) {
 844                shost_printk(KERN_ERR, fnic->lport->host,
 845                        "Tag out of range tag %x hdr status = %s\n",
 846                             id, fnic_fcpio_status_to_str(hdr_status));
 847                return;
 848        }
 849
 850        sc = scsi_host_find_tag(fnic->lport->host, id);
 851        WARN_ON_ONCE(!sc);
 852        if (!sc) {
 853                atomic64_inc(&fnic_stats->io_stats.sc_null);
 854                shost_printk(KERN_ERR, fnic->lport->host,
 855                          "icmnd_cmpl sc is null - "
 856                          "hdr status = %s tag = 0x%x desc = 0x%p\n",
 857                          fnic_fcpio_status_to_str(hdr_status), id, desc);
 858                FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
 859                          fnic->lport->host->host_no, id,
 860                          ((u64)icmnd_cmpl->_resvd0[1] << 16 |
 861                          (u64)icmnd_cmpl->_resvd0[0]),
 862                          ((u64)hdr_status << 16 |
 863                          (u64)icmnd_cmpl->scsi_status << 8 |
 864                          (u64)icmnd_cmpl->flags), desc,
 865                          (u64)icmnd_cmpl->residual, 0);
 866                return;
 867        }
 868
 869        io_lock = fnic_io_lock_hash(fnic, sc);
 870        spin_lock_irqsave(io_lock, flags);
 871        io_req = (struct fnic_io_req *)CMD_SP(sc);
 872        WARN_ON_ONCE(!io_req);
 873        if (!io_req) {
 874                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 875                CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
 876                spin_unlock_irqrestore(io_lock, flags);
 877                shost_printk(KERN_ERR, fnic->lport->host,
 878                          "icmnd_cmpl io_req is null - "
 879                          "hdr status = %s tag = 0x%x sc 0x%p\n",
 880                          fnic_fcpio_status_to_str(hdr_status), id, sc);
 881                return;
 882        }
 883        start_time = io_req->start_time;
 884
 885        /* firmware completed the io */
 886        io_req->io_completed = 1;
 887
 888        /*
 889         *  if SCSI-ML has already issued abort on this command,
 890         *  set completion of the IO. The abts path will clean it up
 891         */
 892        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
 893
 894                /*
 895                 * set the FNIC_IO_DONE so that this doesn't get
 896                 * flagged as 'out of order' if it was not aborted
 897                 */
 898                CMD_FLAGS(sc) |= FNIC_IO_DONE;
 899                CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
 900                spin_unlock_irqrestore(io_lock, flags);
 901                if(FCPIO_ABORTED == hdr_status)
 902                        CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
 903
 904                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 905                        "icmnd_cmpl abts pending "
 906                          "hdr status = %s tag = 0x%x sc = 0x%p "
 907                          "scsi_status = %x residual = %d\n",
 908                          fnic_fcpio_status_to_str(hdr_status),
 909                          id, sc,
 910                          icmnd_cmpl->scsi_status,
 911                          icmnd_cmpl->residual);
 912                return;
 913        }
 914
 915        /* Mark the IO as complete */
 916        CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 917
 918        icmnd_cmpl = &desc->u.icmnd_cmpl;
 919
 920        switch (hdr_status) {
 921        case FCPIO_SUCCESS:
 922                sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
 923                xfer_len = scsi_bufflen(sc);
 924                scsi_set_resid(sc, icmnd_cmpl->residual);
 925
 926                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 927                        xfer_len -= icmnd_cmpl->residual;
 928
 929                if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
 930                        atomic64_inc(&fnic_stats->misc_stats.check_condition);
 931
 932                if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
 933                        atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
 934                break;
 935
 936        case FCPIO_TIMEOUT:          /* request was timed out */
 937                atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
 938                sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
 939                break;
 940
 941        case FCPIO_ABORTED:          /* request was aborted */
 942                atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
 943                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 944                break;
 945
 946        case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
 947                atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
 948                scsi_set_resid(sc, icmnd_cmpl->residual);
 949                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 950                break;
 951
 952        case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
 953                atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
 954                sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
 955                break;
 956
 957        case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
 958                atomic64_inc(&fnic_stats->io_stats.io_not_found);
 959                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 960                break;
 961
 962        case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
 963                atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
 964                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 965                break;
 966
 967        case FCPIO_FW_ERR:           /* request was terminated due fw error */
 968                atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
 969                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 970                break;
 971
 972        case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
 973                atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
 974                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 975                break;
 976
 977        case FCPIO_INVALID_HEADER:   /* header contains invalid data */
 978        case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
 979        case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
 980        default:
 981                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 982                break;
 983        }
 984
 985        /* Break link with the SCSI command */
 986        CMD_SP(sc) = NULL;
 987        CMD_FLAGS(sc) |= FNIC_IO_DONE;
 988
 989        spin_unlock_irqrestore(io_lock, flags);
 990
 991        if (hdr_status != FCPIO_SUCCESS) {
 992                atomic64_inc(&fnic_stats->io_stats.io_failures);
 993                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
 994                             fnic_fcpio_status_to_str(hdr_status));
 995        }
 996
 997        fnic_release_ioreq_buf(fnic, io_req, sc);
 998
 999        mempool_free(io_req, fnic->io_req_pool);
1000
1001        cmd_trace = ((u64)hdr_status << 56) |
1002                  (u64)icmnd_cmpl->scsi_status << 48 |
1003                  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1004                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1005                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1006
1007        FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1008                  sc->device->host->host_no, id, sc,
1009                  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1010                  (u64)icmnd_cmpl->_resvd0[0] << 48 |
1011                  jiffies_to_msecs(jiffies - start_time)),
1012                  desc, cmd_trace,
1013                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1014
1015        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1016                fnic->lport->host_stats.fcp_input_requests++;
1017                fnic->fcp_input_bytes += xfer_len;
1018        } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1019                fnic->lport->host_stats.fcp_output_requests++;
1020                fnic->fcp_output_bytes += xfer_len;
1021        } else
1022                fnic->lport->host_stats.fcp_control_requests++;
1023
1024        atomic64_dec(&fnic_stats->io_stats.active_ios);
1025        if (atomic64_read(&fnic->io_cmpl_skip))
1026                atomic64_dec(&fnic->io_cmpl_skip);
1027        else
1028                atomic64_inc(&fnic_stats->io_stats.io_completions);
1029
1030
1031        io_duration_time = jiffies_to_msecs(jiffies) -
1032                                                jiffies_to_msecs(start_time);
1033
1034        if(io_duration_time <= 10)
1035                atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1036        else if(io_duration_time <= 100)
1037                atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1038        else if(io_duration_time <= 500)
1039                atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1040        else if(io_duration_time <= 5000)
1041                atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1042        else if(io_duration_time <= 10000)
1043                atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1044        else if(io_duration_time <= 30000)
1045                atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1046        else {
1047                atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1048
1049                if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1050                        atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1051        }
1052
1053        /* Call SCSI completion function to complete the IO */
1054        if (sc->scsi_done)
1055                sc->scsi_done(sc);
1056}
1057
1058/* fnic_fcpio_itmf_cmpl_handler
1059 * Routine to handle itmf completions
1060 */
1061static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1062                                        struct fcpio_fw_req *desc)
1063{
1064        u8 type;
1065        u8 hdr_status;
1066        struct fcpio_tag tag;
1067        u32 id;
1068        struct scsi_cmnd *sc;
1069        struct fnic_io_req *io_req;
1070        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1071        struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1072        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1073        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1074        unsigned long flags;
1075        spinlock_t *io_lock;
1076        unsigned long start_time;
1077
1078        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1079        fcpio_tag_id_dec(&tag, &id);
1080
1081        if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1082                shost_printk(KERN_ERR, fnic->lport->host,
1083                "Tag out of range tag %x hdr status = %s\n",
1084                id, fnic_fcpio_status_to_str(hdr_status));
1085                return;
1086        }
1087
1088        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1089        WARN_ON_ONCE(!sc);
1090        if (!sc) {
1091                atomic64_inc(&fnic_stats->io_stats.sc_null);
1092                shost_printk(KERN_ERR, fnic->lport->host,
1093                          "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1094                          fnic_fcpio_status_to_str(hdr_status), id);
1095                return;
1096        }
1097        io_lock = fnic_io_lock_hash(fnic, sc);
1098        spin_lock_irqsave(io_lock, flags);
1099        io_req = (struct fnic_io_req *)CMD_SP(sc);
1100        WARN_ON_ONCE(!io_req);
1101        if (!io_req) {
1102                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1103                spin_unlock_irqrestore(io_lock, flags);
1104                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1105                shost_printk(KERN_ERR, fnic->lport->host,
1106                          "itmf_cmpl io_req is null - "
1107                          "hdr status = %s tag = 0x%x sc 0x%p\n",
1108                          fnic_fcpio_status_to_str(hdr_status), id, sc);
1109                return;
1110        }
1111        start_time = io_req->start_time;
1112
1113        if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1114                /* Abort and terminate completion of device reset req */
1115                /* REVISIT : Add asserts about various flags */
1116                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1117                              "dev reset abts cmpl recd. id %x status %s\n",
1118                              id, fnic_fcpio_status_to_str(hdr_status));
1119                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1120                CMD_ABTS_STATUS(sc) = hdr_status;
1121                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1122                if (io_req->abts_done)
1123                        complete(io_req->abts_done);
1124                spin_unlock_irqrestore(io_lock, flags);
1125        } else if (id & FNIC_TAG_ABORT) {
1126                /* Completion of abort cmd */
1127                switch (hdr_status) {
1128                case FCPIO_SUCCESS:
1129                        break;
1130                case FCPIO_TIMEOUT:
1131                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1132                                atomic64_inc(&abts_stats->abort_fw_timeouts);
1133                        else
1134                                atomic64_inc(
1135                                        &term_stats->terminate_fw_timeouts);
1136                        break;
1137                case FCPIO_ITMF_REJECTED:
1138                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1139                                "abort reject recd. id %d\n",
1140                                (int)(id & FNIC_TAG_MASK));
1141                        break;
1142                case FCPIO_IO_NOT_FOUND:
1143                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1144                                atomic64_inc(&abts_stats->abort_io_not_found);
1145                        else
1146                                atomic64_inc(
1147                                        &term_stats->terminate_io_not_found);
1148                        break;
1149                default:
1150                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1151                                atomic64_inc(&abts_stats->abort_failures);
1152                        else
1153                                atomic64_inc(
1154                                        &term_stats->terminate_failures);
1155                        break;
1156                }
1157                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1158                        /* This is a late completion. Ignore it */
1159                        spin_unlock_irqrestore(io_lock, flags);
1160                        return;
1161                }
1162
1163                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1164                CMD_ABTS_STATUS(sc) = hdr_status;
1165
1166                /* If the status is IO not found consider it as success */
1167                if (hdr_status == FCPIO_IO_NOT_FOUND)
1168                        CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1169
1170                if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1171                        atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1172
1173                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1174                              "abts cmpl recd. id %d status %s\n",
1175                              (int)(id & FNIC_TAG_MASK),
1176                              fnic_fcpio_status_to_str(hdr_status));
1177
1178                /*
1179                 * If scsi_eh thread is blocked waiting for abts to complete,
1180                 * signal completion to it. IO will be cleaned in the thread
1181                 * else clean it in this context
1182                 */
1183                if (io_req->abts_done) {
1184                        complete(io_req->abts_done);
1185                        spin_unlock_irqrestore(io_lock, flags);
1186                } else {
1187                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1188                                      "abts cmpl, completing IO\n");
1189                        CMD_SP(sc) = NULL;
1190                        sc->result = (DID_ERROR << 16);
1191
1192                        spin_unlock_irqrestore(io_lock, flags);
1193
1194                        fnic_release_ioreq_buf(fnic, io_req, sc);
1195                        mempool_free(io_req, fnic->io_req_pool);
1196                        if (sc->scsi_done) {
1197                                FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1198                                        sc->device->host->host_no, id,
1199                                        sc,
1200                                        jiffies_to_msecs(jiffies - start_time),
1201                                        desc,
1202                                        (((u64)hdr_status << 40) |
1203                                        (u64)sc->cmnd[0] << 32 |
1204                                        (u64)sc->cmnd[2] << 24 |
1205                                        (u64)sc->cmnd[3] << 16 |
1206                                        (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1207                                        (((u64)CMD_FLAGS(sc) << 32) |
1208                                        CMD_STATE(sc)));
1209                                sc->scsi_done(sc);
1210                                atomic64_dec(&fnic_stats->io_stats.active_ios);
1211                                if (atomic64_read(&fnic->io_cmpl_skip))
1212                                        atomic64_dec(&fnic->io_cmpl_skip);
1213                                else
1214                                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1215                        }
1216                }
1217
1218        } else if (id & FNIC_TAG_DEV_RST) {
1219                /* Completion of device reset */
1220                CMD_LR_STATUS(sc) = hdr_status;
1221                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1222                        spin_unlock_irqrestore(io_lock, flags);
1223                        CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1224                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1225                                  sc->device->host->host_no, id, sc,
1226                                  jiffies_to_msecs(jiffies - start_time),
1227                                  desc, 0,
1228                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1229                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1230                                "Terminate pending "
1231                                "dev reset cmpl recd. id %d status %s\n",
1232                                (int)(id & FNIC_TAG_MASK),
1233                                fnic_fcpio_status_to_str(hdr_status));
1234                        return;
1235                }
1236                if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1237                        /* Need to wait for terminate completion */
1238                        spin_unlock_irqrestore(io_lock, flags);
1239                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1240                                  sc->device->host->host_no, id, sc,
1241                                  jiffies_to_msecs(jiffies - start_time),
1242                                  desc, 0,
1243                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1244                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1245                                "dev reset cmpl recd after time out. "
1246                                "id %d status %s\n",
1247                                (int)(id & FNIC_TAG_MASK),
1248                                fnic_fcpio_status_to_str(hdr_status));
1249                        return;
1250                }
1251                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1252                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1253                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1254                              "dev reset cmpl recd. id %d status %s\n",
1255                              (int)(id & FNIC_TAG_MASK),
1256                              fnic_fcpio_status_to_str(hdr_status));
1257                if (io_req->dr_done)
1258                        complete(io_req->dr_done);
1259                spin_unlock_irqrestore(io_lock, flags);
1260
1261        } else {
1262                shost_printk(KERN_ERR, fnic->lport->host,
1263                             "Unexpected itmf io state %s tag %x\n",
1264                             fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1265                spin_unlock_irqrestore(io_lock, flags);
1266        }
1267
1268}
1269
1270/*
1271 * fnic_fcpio_cmpl_handler
1272 * Routine to service the cq for wq_copy
1273 */
1274static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1275                                   unsigned int cq_index,
1276                                   struct fcpio_fw_req *desc)
1277{
1278        struct fnic *fnic = vnic_dev_priv(vdev);
1279
1280        switch (desc->hdr.type) {
1281        case FCPIO_ICMND_CMPL: /* fw completed a command */
1282        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1283        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1284        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1285        case FCPIO_RESET_CMPL: /* fw completed reset */
1286                atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1287                break;
1288        default:
1289                break;
1290        }
1291
1292        switch (desc->hdr.type) {
1293        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1294                fnic_fcpio_ack_handler(fnic, cq_index, desc);
1295                break;
1296
1297        case FCPIO_ICMND_CMPL: /* fw completed a command */
1298                fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1299                break;
1300
1301        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1302                fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1303                break;
1304
1305        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1306        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1307                fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1308                break;
1309
1310        case FCPIO_RESET_CMPL: /* fw completed reset */
1311                fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1312                break;
1313
1314        default:
1315                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1316                              "firmware completion type %d\n",
1317                              desc->hdr.type);
1318                break;
1319        }
1320
1321        return 0;
1322}
1323
1324/*
1325 * fnic_wq_copy_cmpl_handler
1326 * Routine to process wq copy
1327 */
1328int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1329{
1330        unsigned int wq_work_done = 0;
1331        unsigned int i, cq_index;
1332        unsigned int cur_work_done;
1333        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1334        u64 start_jiffies = 0;
1335        u64 end_jiffies = 0;
1336        u64 delta_jiffies = 0;
1337        u64 delta_ms = 0;
1338
1339        for (i = 0; i < fnic->wq_copy_count; i++) {
1340                cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1341
1342                start_jiffies = jiffies;
1343                cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1344                                                     fnic_fcpio_cmpl_handler,
1345                                                     copy_work_to_do);
1346                end_jiffies = jiffies;
1347
1348                wq_work_done += cur_work_done;
1349                delta_jiffies = end_jiffies - start_jiffies;
1350                if (delta_jiffies >
1351                        (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1352                        atomic64_set(&misc_stats->max_isr_jiffies,
1353                                        delta_jiffies);
1354                        delta_ms = jiffies_to_msecs(delta_jiffies);
1355                        atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1356                        atomic64_set(&misc_stats->corr_work_done,
1357                                        cur_work_done);
1358                }
1359        }
1360        return wq_work_done;
1361}
1362
1363static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1364{
1365        int i;
1366        struct fnic_io_req *io_req;
1367        unsigned long flags = 0;
1368        struct scsi_cmnd *sc;
1369        spinlock_t *io_lock;
1370        unsigned long start_time = 0;
1371        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1372
1373        for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1374                if (i == exclude_id)
1375                        continue;
1376
1377                io_lock = fnic_io_lock_tag(fnic, i);
1378                spin_lock_irqsave(io_lock, flags);
1379                sc = scsi_host_find_tag(fnic->lport->host, i);
1380                if (!sc) {
1381                        spin_unlock_irqrestore(io_lock, flags);
1382                        continue;
1383                }
1384
1385                io_req = (struct fnic_io_req *)CMD_SP(sc);
1386                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1387                        !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1388                        /*
1389                         * We will be here only when FW completes reset
1390                         * without sending completions for outstanding ios.
1391                         */
1392                        CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1393                        if (io_req && io_req->dr_done)
1394                                complete(io_req->dr_done);
1395                        else if (io_req && io_req->abts_done)
1396                                complete(io_req->abts_done);
1397                        spin_unlock_irqrestore(io_lock, flags);
1398                        continue;
1399                } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1400                        spin_unlock_irqrestore(io_lock, flags);
1401                        continue;
1402                }
1403                if (!io_req) {
1404                        spin_unlock_irqrestore(io_lock, flags);
1405                        continue;
1406                }
1407
1408                CMD_SP(sc) = NULL;
1409
1410                spin_unlock_irqrestore(io_lock, flags);
1411
1412                /*
1413                 * If there is a scsi_cmnd associated with this io_req, then
1414                 * free the corresponding state
1415                 */
1416                start_time = io_req->start_time;
1417                fnic_release_ioreq_buf(fnic, io_req, sc);
1418                mempool_free(io_req, fnic->io_req_pool);
1419
1420                sc->result = DID_TRANSPORT_DISRUPTED << 16;
1421                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1422                              "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1423                              __func__, sc->request->tag, sc,
1424                              (jiffies - start_time));
1425
1426                if (atomic64_read(&fnic->io_cmpl_skip))
1427                        atomic64_dec(&fnic->io_cmpl_skip);
1428                else
1429                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1430
1431                /* Complete the command to SCSI */
1432                if (sc->scsi_done) {
1433                        if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
1434                                shost_printk(KERN_ERR, fnic->lport->host,
1435                                "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
1436                                 sc->request->tag, sc);
1437
1438                        FNIC_TRACE(fnic_cleanup_io,
1439                                  sc->device->host->host_no, i, sc,
1440                                  jiffies_to_msecs(jiffies - start_time),
1441                                  0, ((u64)sc->cmnd[0] << 32 |
1442                                  (u64)sc->cmnd[2] << 24 |
1443                                  (u64)sc->cmnd[3] << 16 |
1444                                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1445                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1446
1447                        sc->scsi_done(sc);
1448                }
1449        }
1450}
1451
1452void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1453                                  struct fcpio_host_req *desc)
1454{
1455        u32 id;
1456        struct fnic *fnic = vnic_dev_priv(wq->vdev);
1457        struct fnic_io_req *io_req;
1458        struct scsi_cmnd *sc;
1459        unsigned long flags;
1460        spinlock_t *io_lock;
1461        unsigned long start_time = 0;
1462
1463        /* get the tag reference */
1464        fcpio_tag_id_dec(&desc->hdr.tag, &id);
1465        id &= FNIC_TAG_MASK;
1466
1467        if (id >= fnic->fnic_max_tag_id)
1468                return;
1469
1470        sc = scsi_host_find_tag(fnic->lport->host, id);
1471        if (!sc)
1472                return;
1473
1474        io_lock = fnic_io_lock_hash(fnic, sc);
1475        spin_lock_irqsave(io_lock, flags);
1476
1477        /* Get the IO context which this desc refers to */
1478        io_req = (struct fnic_io_req *)CMD_SP(sc);
1479
1480        /* fnic interrupts are turned off by now */
1481
1482        if (!io_req) {
1483                spin_unlock_irqrestore(io_lock, flags);
1484                goto wq_copy_cleanup_scsi_cmd;
1485        }
1486
1487        CMD_SP(sc) = NULL;
1488
1489        spin_unlock_irqrestore(io_lock, flags);
1490
1491        start_time = io_req->start_time;
1492        fnic_release_ioreq_buf(fnic, io_req, sc);
1493        mempool_free(io_req, fnic->io_req_pool);
1494
1495wq_copy_cleanup_scsi_cmd:
1496        sc->result = DID_NO_CONNECT << 16;
1497        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1498                      " DID_NO_CONNECT\n");
1499
1500        if (sc->scsi_done) {
1501                FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1502                          sc->device->host->host_no, id, sc,
1503                          jiffies_to_msecs(jiffies - start_time),
1504                          0, ((u64)sc->cmnd[0] << 32 |
1505                          (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1506                          (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1507                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1508
1509                sc->scsi_done(sc);
1510        }
1511}
1512
1513static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1514                                          u32 task_req, u8 *fc_lun,
1515                                          struct fnic_io_req *io_req)
1516{
1517        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1518        struct Scsi_Host *host = fnic->lport->host;
1519        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1520        unsigned long flags;
1521
1522        spin_lock_irqsave(host->host_lock, flags);
1523        if (unlikely(fnic_chk_state_flags_locked(fnic,
1524                                                FNIC_FLAGS_IO_BLOCKED))) {
1525                spin_unlock_irqrestore(host->host_lock, flags);
1526                return 1;
1527        } else
1528                atomic_inc(&fnic->in_flight);
1529        spin_unlock_irqrestore(host->host_lock, flags);
1530
1531        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1532
1533        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1534                free_wq_copy_descs(fnic, wq);
1535
1536        if (!vnic_wq_copy_desc_avail(wq)) {
1537                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1538                atomic_dec(&fnic->in_flight);
1539                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1540                        "fnic_queue_abort_io_req: failure: no descriptors\n");
1541                atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1542                return 1;
1543        }
1544        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1545                                     0, task_req, tag, fc_lun, io_req->port_id,
1546                                     fnic->config.ra_tov, fnic->config.ed_tov);
1547
1548        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1549        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1550                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1551                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1552                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1553
1554        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1555        atomic_dec(&fnic->in_flight);
1556
1557        return 0;
1558}
1559
1560static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1561{
1562        int tag;
1563        int abt_tag;
1564        int term_cnt = 0;
1565        struct fnic_io_req *io_req;
1566        spinlock_t *io_lock;
1567        unsigned long flags;
1568        struct scsi_cmnd *sc;
1569        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1570        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1571        struct scsi_lun fc_lun;
1572        enum fnic_ioreq_state old_ioreq_state;
1573
1574        FNIC_SCSI_DBG(KERN_DEBUG,
1575                      fnic->lport->host,
1576                      "fnic_rport_exch_reset called portid 0x%06x\n",
1577                      port_id);
1578
1579        if (fnic->in_remove)
1580                return;
1581
1582        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1583                abt_tag = tag;
1584                io_lock = fnic_io_lock_tag(fnic, tag);
1585                spin_lock_irqsave(io_lock, flags);
1586                sc = scsi_host_find_tag(fnic->lport->host, tag);
1587                if (!sc) {
1588                        spin_unlock_irqrestore(io_lock, flags);
1589                        continue;
1590                }
1591
1592                io_req = (struct fnic_io_req *)CMD_SP(sc);
1593
1594                if (!io_req || io_req->port_id != port_id) {
1595                        spin_unlock_irqrestore(io_lock, flags);
1596                        continue;
1597                }
1598
1599                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1600                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1601                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1602                        "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1603                        sc);
1604                        spin_unlock_irqrestore(io_lock, flags);
1605                        continue;
1606                }
1607
1608                /*
1609                 * Found IO that is still pending with firmware and
1610                 * belongs to rport that went away
1611                 */
1612                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1613                        spin_unlock_irqrestore(io_lock, flags);
1614                        continue;
1615                }
1616                if (io_req->abts_done) {
1617                        shost_printk(KERN_ERR, fnic->lport->host,
1618                        "fnic_rport_exch_reset: io_req->abts_done is set "
1619                        "state is %s\n",
1620                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1621                }
1622
1623                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1624                        shost_printk(KERN_ERR, fnic->lport->host,
1625                                  "rport_exch_reset "
1626                                  "IO not yet issued %p tag 0x%x flags "
1627                                  "%x state %d\n",
1628                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1629                }
1630                old_ioreq_state = CMD_STATE(sc);
1631                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1632                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1633                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1634                        atomic64_inc(&reset_stats->device_reset_terminates);
1635                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1636                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1637                        "fnic_rport_exch_reset dev rst sc 0x%p\n",
1638                        sc);
1639                }
1640
1641                BUG_ON(io_req->abts_done);
1642
1643                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1644                              "fnic_rport_reset_exch: Issuing abts\n");
1645
1646                spin_unlock_irqrestore(io_lock, flags);
1647
1648                /* Now queue the abort command to firmware */
1649                int_to_scsilun(sc->device->lun, &fc_lun);
1650
1651                if (fnic_queue_abort_io_req(fnic, abt_tag,
1652                                            FCPIO_ITMF_ABT_TASK_TERM,
1653                                            fc_lun.scsi_lun, io_req)) {
1654                        /*
1655                         * Revert the cmd state back to old state, if
1656                         * it hasn't changed in between. This cmd will get
1657                         * aborted later by scsi_eh, or cleaned up during
1658                         * lun reset
1659                         */
1660                        spin_lock_irqsave(io_lock, flags);
1661                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1662                                CMD_STATE(sc) = old_ioreq_state;
1663                        spin_unlock_irqrestore(io_lock, flags);
1664                } else {
1665                        spin_lock_irqsave(io_lock, flags);
1666                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1667                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1668                        else
1669                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1670                        spin_unlock_irqrestore(io_lock, flags);
1671                        atomic64_inc(&term_stats->terminates);
1672                        term_cnt++;
1673                }
1674        }
1675        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1676                atomic64_set(&term_stats->max_terminates, term_cnt);
1677
1678}
1679
1680void fnic_terminate_rport_io(struct fc_rport *rport)
1681{
1682        int tag;
1683        int abt_tag;
1684        int term_cnt = 0;
1685        struct fnic_io_req *io_req;
1686        spinlock_t *io_lock;
1687        unsigned long flags;
1688        struct scsi_cmnd *sc;
1689        struct scsi_lun fc_lun;
1690        struct fc_rport_libfc_priv *rdata;
1691        struct fc_lport *lport;
1692        struct fnic *fnic;
1693        struct fc_rport *cmd_rport;
1694        struct reset_stats *reset_stats;
1695        struct terminate_stats *term_stats;
1696        enum fnic_ioreq_state old_ioreq_state;
1697
1698        if (!rport) {
1699                printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1700                return;
1701        }
1702        rdata = rport->dd_data;
1703
1704        if (!rdata) {
1705                printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1706                return;
1707        }
1708        lport = rdata->local_port;
1709
1710        if (!lport) {
1711                printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1712                return;
1713        }
1714        fnic = lport_priv(lport);
1715        FNIC_SCSI_DBG(KERN_DEBUG,
1716                      fnic->lport->host, "fnic_terminate_rport_io called"
1717                      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1718                      rport->port_name, rport->node_name, rport,
1719                      rport->port_id);
1720
1721        if (fnic->in_remove)
1722                return;
1723
1724        reset_stats = &fnic->fnic_stats.reset_stats;
1725        term_stats = &fnic->fnic_stats.term_stats;
1726
1727        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1728                abt_tag = tag;
1729                io_lock = fnic_io_lock_tag(fnic, tag);
1730                spin_lock_irqsave(io_lock, flags);
1731                sc = scsi_host_find_tag(fnic->lport->host, tag);
1732                if (!sc) {
1733                        spin_unlock_irqrestore(io_lock, flags);
1734                        continue;
1735                }
1736
1737                cmd_rport = starget_to_rport(scsi_target(sc->device));
1738                if (rport != cmd_rport) {
1739                        spin_unlock_irqrestore(io_lock, flags);
1740                        continue;
1741                }
1742
1743                io_req = (struct fnic_io_req *)CMD_SP(sc);
1744
1745                if (!io_req || rport != cmd_rport) {
1746                        spin_unlock_irqrestore(io_lock, flags);
1747                        continue;
1748                }
1749
1750                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1751                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1752                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1753                        "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1754                        sc);
1755                        spin_unlock_irqrestore(io_lock, flags);
1756                        continue;
1757                }
1758                /*
1759                 * Found IO that is still pending with firmware and
1760                 * belongs to rport that went away
1761                 */
1762                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1763                        spin_unlock_irqrestore(io_lock, flags);
1764                        continue;
1765                }
1766                if (io_req->abts_done) {
1767                        shost_printk(KERN_ERR, fnic->lport->host,
1768                        "fnic_terminate_rport_io: io_req->abts_done is set "
1769                        "state is %s\n",
1770                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1771                }
1772                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1773                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1774                                  "fnic_terminate_rport_io "
1775                                  "IO not yet issued %p tag 0x%x flags "
1776                                  "%x state %d\n",
1777                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1778                }
1779                old_ioreq_state = CMD_STATE(sc);
1780                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1781                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1782                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1783                        atomic64_inc(&reset_stats->device_reset_terminates);
1784                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1785                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1786                        "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1787                }
1788
1789                BUG_ON(io_req->abts_done);
1790
1791                FNIC_SCSI_DBG(KERN_DEBUG,
1792                              fnic->lport->host,
1793                              "fnic_terminate_rport_io: Issuing abts\n");
1794
1795                spin_unlock_irqrestore(io_lock, flags);
1796
1797                /* Now queue the abort command to firmware */
1798                int_to_scsilun(sc->device->lun, &fc_lun);
1799
1800                if (fnic_queue_abort_io_req(fnic, abt_tag,
1801                                            FCPIO_ITMF_ABT_TASK_TERM,
1802                                            fc_lun.scsi_lun, io_req)) {
1803                        /*
1804                         * Revert the cmd state back to old state, if
1805                         * it hasn't changed in between. This cmd will get
1806                         * aborted later by scsi_eh, or cleaned up during
1807                         * lun reset
1808                         */
1809                        spin_lock_irqsave(io_lock, flags);
1810                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1811                                CMD_STATE(sc) = old_ioreq_state;
1812                        spin_unlock_irqrestore(io_lock, flags);
1813                } else {
1814                        spin_lock_irqsave(io_lock, flags);
1815                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1816                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1817                        else
1818                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1819                        spin_unlock_irqrestore(io_lock, flags);
1820                        atomic64_inc(&term_stats->terminates);
1821                        term_cnt++;
1822                }
1823        }
1824        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1825                atomic64_set(&term_stats->max_terminates, term_cnt);
1826
1827}
1828
1829/*
1830 * This function is exported to SCSI for sending abort cmnds.
1831 * A SCSI IO is represented by a io_req in the driver.
1832 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1833 */
1834int fnic_abort_cmd(struct scsi_cmnd *sc)
1835{
1836        struct fc_lport *lp;
1837        struct fnic *fnic;
1838        struct fnic_io_req *io_req = NULL;
1839        struct fc_rport *rport;
1840        spinlock_t *io_lock;
1841        unsigned long flags;
1842        unsigned long start_time = 0;
1843        int ret = SUCCESS;
1844        u32 task_req = 0;
1845        struct scsi_lun fc_lun;
1846        struct fnic_stats *fnic_stats;
1847        struct abort_stats *abts_stats;
1848        struct terminate_stats *term_stats;
1849        enum fnic_ioreq_state old_ioreq_state;
1850        int tag;
1851        unsigned long abt_issued_time;
1852        DECLARE_COMPLETION_ONSTACK(tm_done);
1853
1854        /* Wait for rport to unblock */
1855        fc_block_scsi_eh(sc);
1856
1857        /* Get local-port, check ready and link up */
1858        lp = shost_priv(sc->device->host);
1859
1860        fnic = lport_priv(lp);
1861        fnic_stats = &fnic->fnic_stats;
1862        abts_stats = &fnic->fnic_stats.abts_stats;
1863        term_stats = &fnic->fnic_stats.term_stats;
1864
1865        rport = starget_to_rport(scsi_target(sc->device));
1866        tag = sc->request->tag;
1867        FNIC_SCSI_DBG(KERN_DEBUG,
1868                fnic->lport->host,
1869                "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1870                rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1871
1872        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1873
1874        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1875                ret = FAILED;
1876                goto fnic_abort_cmd_end;
1877        }
1878
1879        /*
1880         * Avoid a race between SCSI issuing the abort and the device
1881         * completing the command.
1882         *
1883         * If the command is already completed by the fw cmpl code,
1884         * we just return SUCCESS from here. This means that the abort
1885         * succeeded. In the SCSI ML, since the timeout for command has
1886         * happened, the completion wont actually complete the command
1887         * and it will be considered as an aborted command
1888         *
1889         * The CMD_SP will not be cleared except while holding io_req_lock.
1890         */
1891        io_lock = fnic_io_lock_hash(fnic, sc);
1892        spin_lock_irqsave(io_lock, flags);
1893        io_req = (struct fnic_io_req *)CMD_SP(sc);
1894        if (!io_req) {
1895                spin_unlock_irqrestore(io_lock, flags);
1896                goto fnic_abort_cmd_end;
1897        }
1898
1899        io_req->abts_done = &tm_done;
1900
1901        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1902                spin_unlock_irqrestore(io_lock, flags);
1903                goto wait_pending;
1904        }
1905
1906        abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1907        if (abt_issued_time <= 6000)
1908                atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1909        else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1910                atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1911        else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1912                atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1913        else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1914                atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1915        else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1916                atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1917        else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1918                atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1919        else
1920                atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1921
1922        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1923                "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1924        /*
1925         * Command is still pending, need to abort it
1926         * If the firmware completes the command after this point,
1927         * the completion wont be done till mid-layer, since abort
1928         * has already started.
1929         */
1930        old_ioreq_state = CMD_STATE(sc);
1931        CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1932        CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1933
1934        spin_unlock_irqrestore(io_lock, flags);
1935
1936        /*
1937         * Check readiness of the remote port. If the path to remote
1938         * port is up, then send abts to the remote port to terminate
1939         * the IO. Else, just locally terminate the IO in the firmware
1940         */
1941        if (fc_remote_port_chkready(rport) == 0)
1942                task_req = FCPIO_ITMF_ABT_TASK;
1943        else {
1944                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1945                task_req = FCPIO_ITMF_ABT_TASK_TERM;
1946        }
1947
1948        /* Now queue the abort command to firmware */
1949        int_to_scsilun(sc->device->lun, &fc_lun);
1950
1951        if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1952                                    fc_lun.scsi_lun, io_req)) {
1953                spin_lock_irqsave(io_lock, flags);
1954                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1955                        CMD_STATE(sc) = old_ioreq_state;
1956                io_req = (struct fnic_io_req *)CMD_SP(sc);
1957                if (io_req)
1958                        io_req->abts_done = NULL;
1959                spin_unlock_irqrestore(io_lock, flags);
1960                ret = FAILED;
1961                goto fnic_abort_cmd_end;
1962        }
1963        if (task_req == FCPIO_ITMF_ABT_TASK) {
1964                CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1965                atomic64_inc(&fnic_stats->abts_stats.aborts);
1966        } else {
1967                CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1968                atomic64_inc(&fnic_stats->term_stats.terminates);
1969        }
1970
1971        /*
1972         * We queued an abort IO, wait for its completion.
1973         * Once the firmware completes the abort command, it will
1974         * wake up this thread.
1975         */
1976 wait_pending:
1977        wait_for_completion_timeout(&tm_done,
1978                                    msecs_to_jiffies
1979                                    (2 * fnic->config.ra_tov +
1980                                     fnic->config.ed_tov));
1981
1982        /* Check the abort status */
1983        spin_lock_irqsave(io_lock, flags);
1984
1985        io_req = (struct fnic_io_req *)CMD_SP(sc);
1986        if (!io_req) {
1987                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1988                spin_unlock_irqrestore(io_lock, flags);
1989                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1990                ret = FAILED;
1991                goto fnic_abort_cmd_end;
1992        }
1993        io_req->abts_done = NULL;
1994
1995        /* fw did not complete abort, timed out */
1996        if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1997                spin_unlock_irqrestore(io_lock, flags);
1998                if (task_req == FCPIO_ITMF_ABT_TASK) {
1999                        atomic64_inc(&abts_stats->abort_drv_timeouts);
2000                } else {
2001                        atomic64_inc(&term_stats->terminate_drv_timeouts);
2002                }
2003                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
2004                ret = FAILED;
2005                goto fnic_abort_cmd_end;
2006        }
2007
2008        /* IO out of order */
2009
2010        if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
2011                spin_unlock_irqrestore(io_lock, flags);
2012                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2013                        "Issuing Host reset due to out of order IO\n");
2014
2015                ret = FAILED;
2016                goto fnic_abort_cmd_end;
2017        }
2018
2019        CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2020
2021        start_time = io_req->start_time;
2022        /*
2023         * firmware completed the abort, check the status,
2024         * free the io_req if successful. If abort fails,
2025         * Device reset will clean the I/O.
2026         */
2027        if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
2028                CMD_SP(sc) = NULL;
2029        else {
2030                ret = FAILED;
2031                spin_unlock_irqrestore(io_lock, flags);
2032                goto fnic_abort_cmd_end;
2033        }
2034
2035        spin_unlock_irqrestore(io_lock, flags);
2036
2037        fnic_release_ioreq_buf(fnic, io_req, sc);
2038        mempool_free(io_req, fnic->io_req_pool);
2039
2040        if (sc->scsi_done) {
2041        /* Call SCSI completion function to complete the IO */
2042                sc->result = (DID_ABORT << 16);
2043                sc->scsi_done(sc);
2044                atomic64_dec(&fnic_stats->io_stats.active_ios);
2045                if (atomic64_read(&fnic->io_cmpl_skip))
2046                        atomic64_dec(&fnic->io_cmpl_skip);
2047                else
2048                        atomic64_inc(&fnic_stats->io_stats.io_completions);
2049        }
2050
2051fnic_abort_cmd_end:
2052        FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
2053                  sc->request->tag, sc,
2054                  jiffies_to_msecs(jiffies - start_time),
2055                  0, ((u64)sc->cmnd[0] << 32 |
2056                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2057                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2058                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2059
2060        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2061                      "Returning from abort cmd type %x %s\n", task_req,
2062                      (ret == SUCCESS) ?
2063                      "SUCCESS" : "FAILED");
2064        return ret;
2065}
2066
2067static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2068                                       struct scsi_cmnd *sc,
2069                                       struct fnic_io_req *io_req)
2070{
2071        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
2072        struct Scsi_Host *host = fnic->lport->host;
2073        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2074        struct scsi_lun fc_lun;
2075        int ret = 0;
2076        unsigned long intr_flags;
2077
2078        spin_lock_irqsave(host->host_lock, intr_flags);
2079        if (unlikely(fnic_chk_state_flags_locked(fnic,
2080                                                FNIC_FLAGS_IO_BLOCKED))) {
2081                spin_unlock_irqrestore(host->host_lock, intr_flags);
2082                return FAILED;
2083        } else
2084                atomic_inc(&fnic->in_flight);
2085        spin_unlock_irqrestore(host->host_lock, intr_flags);
2086
2087        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2088
2089        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2090                free_wq_copy_descs(fnic, wq);
2091
2092        if (!vnic_wq_copy_desc_avail(wq)) {
2093                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2094                          "queue_dr_io_req failure - no descriptors\n");
2095                atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2096                ret = -EAGAIN;
2097                goto lr_io_req_end;
2098        }
2099
2100        /* fill in the lun info */
2101        int_to_scsilun(sc->device->lun, &fc_lun);
2102
2103        fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2104                                     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2105                                     fc_lun.scsi_lun, io_req->port_id,
2106                                     fnic->config.ra_tov, fnic->config.ed_tov);
2107
2108        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2109        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2110                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2111                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2112                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2113
2114lr_io_req_end:
2115        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2116        atomic_dec(&fnic->in_flight);
2117
2118        return ret;
2119}
2120
2121/*
2122 * Clean up any pending aborts on the lun
2123 * For each outstanding IO on this lun, whose abort is not completed by fw,
2124 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2125 * successfully aborted, 1 otherwise
2126 */
2127static int fnic_clean_pending_aborts(struct fnic *fnic,
2128                                     struct scsi_cmnd *lr_sc,
2129                                         bool new_sc)
2130
2131{
2132        int tag, abt_tag;
2133        struct fnic_io_req *io_req;
2134        spinlock_t *io_lock;
2135        unsigned long flags;
2136        int ret = 0;
2137        struct scsi_cmnd *sc;
2138        struct scsi_lun fc_lun;
2139        struct scsi_device *lun_dev = lr_sc->device;
2140        DECLARE_COMPLETION_ONSTACK(tm_done);
2141        enum fnic_ioreq_state old_ioreq_state;
2142
2143        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2144                io_lock = fnic_io_lock_tag(fnic, tag);
2145                spin_lock_irqsave(io_lock, flags);
2146                sc = scsi_host_find_tag(fnic->lport->host, tag);
2147                /*
2148                 * ignore this lun reset cmd if issued using new SC
2149                 * or cmds that do not belong to this lun
2150                 */
2151                if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2152                        spin_unlock_irqrestore(io_lock, flags);
2153                        continue;
2154                }
2155
2156                io_req = (struct fnic_io_req *)CMD_SP(sc);
2157
2158                if (!io_req || sc->device != lun_dev) {
2159                        spin_unlock_irqrestore(io_lock, flags);
2160                        continue;
2161                }
2162
2163                /*
2164                 * Found IO that is still pending with firmware and
2165                 * belongs to the LUN that we are resetting
2166                 */
2167                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2168                              "Found IO in %s on lun\n",
2169                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2170
2171                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2172                        spin_unlock_irqrestore(io_lock, flags);
2173                        continue;
2174                }
2175                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2176                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2177                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2178                                "%s dev rst not pending sc 0x%p\n", __func__,
2179                                sc);
2180                        spin_unlock_irqrestore(io_lock, flags);
2181                        continue;
2182                }
2183
2184                if (io_req->abts_done)
2185                        shost_printk(KERN_ERR, fnic->lport->host,
2186                          "%s: io_req->abts_done is set state is %s\n",
2187                          __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2188                old_ioreq_state = CMD_STATE(sc);
2189                /*
2190                 * Any pending IO issued prior to reset is expected to be
2191                 * in abts pending state, if not we need to set
2192                 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2193                 * When IO is completed, the IO will be handed over and
2194                 * handled in this function.
2195                 */
2196                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2197
2198                BUG_ON(io_req->abts_done);
2199
2200                abt_tag = tag;
2201                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2202                        abt_tag |= FNIC_TAG_DEV_RST;
2203                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2204                                  "%s: dev rst sc 0x%p\n", __func__, sc);
2205                }
2206
2207                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2208                io_req->abts_done = &tm_done;
2209                spin_unlock_irqrestore(io_lock, flags);
2210
2211                /* Now queue the abort command to firmware */
2212                int_to_scsilun(sc->device->lun, &fc_lun);
2213
2214                if (fnic_queue_abort_io_req(fnic, abt_tag,
2215                                            FCPIO_ITMF_ABT_TASK_TERM,
2216                                            fc_lun.scsi_lun, io_req)) {
2217                        spin_lock_irqsave(io_lock, flags);
2218                        io_req = (struct fnic_io_req *)CMD_SP(sc);
2219                        if (io_req)
2220                                io_req->abts_done = NULL;
2221                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2222                                CMD_STATE(sc) = old_ioreq_state;
2223                        spin_unlock_irqrestore(io_lock, flags);
2224                        ret = 1;
2225                        goto clean_pending_aborts_end;
2226                } else {
2227                        spin_lock_irqsave(io_lock, flags);
2228                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2229                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2230                        spin_unlock_irqrestore(io_lock, flags);
2231                }
2232                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2233
2234                wait_for_completion_timeout(&tm_done,
2235                                            msecs_to_jiffies
2236                                            (fnic->config.ed_tov));
2237
2238                /* Recheck cmd state to check if it is now aborted */
2239                spin_lock_irqsave(io_lock, flags);
2240                io_req = (struct fnic_io_req *)CMD_SP(sc);
2241                if (!io_req) {
2242                        spin_unlock_irqrestore(io_lock, flags);
2243                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2244                        continue;
2245                }
2246
2247                io_req->abts_done = NULL;
2248
2249                /* if abort is still pending with fw, fail */
2250                if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2251                        spin_unlock_irqrestore(io_lock, flags);
2252                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2253                        ret = 1;
2254                        goto clean_pending_aborts_end;
2255                }
2256                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2257
2258                /* original sc used for lr is handled by dev reset code */
2259                if (sc != lr_sc)
2260                        CMD_SP(sc) = NULL;
2261                spin_unlock_irqrestore(io_lock, flags);
2262
2263                /* original sc used for lr is handled by dev reset code */
2264                if (sc != lr_sc) {
2265                        fnic_release_ioreq_buf(fnic, io_req, sc);
2266                        mempool_free(io_req, fnic->io_req_pool);
2267                }
2268
2269                /*
2270                 * Any IO is returned during reset, it needs to call scsi_done
2271                 * to return the scsi_cmnd to upper layer.
2272                 */
2273                if (sc->scsi_done) {
2274                        /* Set result to let upper SCSI layer retry */
2275                        sc->result = DID_RESET << 16;
2276                        sc->scsi_done(sc);
2277                }
2278        }
2279
2280        schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2281
2282        /* walk again to check, if IOs are still pending in fw */
2283        if (fnic_is_abts_pending(fnic, lr_sc))
2284                ret = FAILED;
2285
2286clean_pending_aborts_end:
2287        return ret;
2288}
2289
2290/**
2291 * fnic_scsi_host_start_tag
2292 * Allocates tagid from host's tag list
2293 **/
2294static inline int
2295fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2296{
2297        struct request_queue *q = sc->request->q;
2298        struct request *dummy;
2299
2300        dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2301        if (IS_ERR(dummy))
2302                return SCSI_NO_TAG;
2303
2304        sc->tag = sc->request->tag = dummy->tag;
2305        sc->host_scribble = (unsigned char *)dummy;
2306
2307        return dummy->tag;
2308}
2309
2310/**
2311 * fnic_scsi_host_end_tag
2312 * frees tag allocated by fnic_scsi_host_start_tag.
2313 **/
2314static inline void
2315fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2316{
2317        struct request *dummy = (struct request *)sc->host_scribble;
2318
2319        blk_mq_free_request(dummy);
2320}
2321
2322/*
2323 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2324 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2325 * on the LUN.
2326 */
2327int fnic_device_reset(struct scsi_cmnd *sc)
2328{
2329        struct fc_lport *lp;
2330        struct fnic *fnic;
2331        struct fnic_io_req *io_req = NULL;
2332        struct fc_rport *rport;
2333        int status;
2334        int ret = FAILED;
2335        spinlock_t *io_lock;
2336        unsigned long flags;
2337        unsigned long start_time = 0;
2338        struct scsi_lun fc_lun;
2339        struct fnic_stats *fnic_stats;
2340        struct reset_stats *reset_stats;
2341        int tag = 0;
2342        DECLARE_COMPLETION_ONSTACK(tm_done);
2343        int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
2344        bool new_sc = 0;
2345
2346        /* Wait for rport to unblock */
2347        fc_block_scsi_eh(sc);
2348
2349        /* Get local-port, check ready and link up */
2350        lp = shost_priv(sc->device->host);
2351
2352        fnic = lport_priv(lp);
2353        fnic_stats = &fnic->fnic_stats;
2354        reset_stats = &fnic->fnic_stats.reset_stats;
2355
2356        atomic64_inc(&reset_stats->device_resets);
2357
2358        rport = starget_to_rport(scsi_target(sc->device));
2359        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2360                      "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2361                      rport->port_id, sc->device->lun, sc);
2362
2363        if (lp->state != LPORT_ST_READY || !(lp->link_up))
2364                goto fnic_device_reset_end;
2365
2366        /* Check if remote port up */
2367        if (fc_remote_port_chkready(rport)) {
2368                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2369                goto fnic_device_reset_end;
2370        }
2371
2372        CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2373        /* Allocate tag if not present */
2374
2375        tag = sc->request->tag;
2376        if (unlikely(tag < 0)) {
2377                /*
2378                 * Really should fix the midlayer to pass in a proper
2379                 * request for ioctls...
2380                 */
2381                tag = fnic_scsi_host_start_tag(fnic, sc);
2382                if (unlikely(tag == SCSI_NO_TAG))
2383                        goto fnic_device_reset_end;
2384                tag_gen_flag = 1;
2385                new_sc = 1;
2386        }
2387        io_lock = fnic_io_lock_hash(fnic, sc);
2388        spin_lock_irqsave(io_lock, flags);
2389        io_req = (struct fnic_io_req *)CMD_SP(sc);
2390
2391        /*
2392         * If there is a io_req attached to this command, then use it,
2393         * else allocate a new one.
2394         */
2395        if (!io_req) {
2396                io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2397                if (!io_req) {
2398                        spin_unlock_irqrestore(io_lock, flags);
2399                        goto fnic_device_reset_end;
2400                }
2401                memset(io_req, 0, sizeof(*io_req));
2402                io_req->port_id = rport->port_id;
2403                CMD_SP(sc) = (char *)io_req;
2404        }
2405        io_req->dr_done = &tm_done;
2406        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2407        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2408        spin_unlock_irqrestore(io_lock, flags);
2409
2410        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2411
2412        /*
2413         * issue the device reset, if enqueue failed, clean up the ioreq
2414         * and break assoc with scsi cmd
2415         */
2416        if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2417                spin_lock_irqsave(io_lock, flags);
2418                io_req = (struct fnic_io_req *)CMD_SP(sc);
2419                if (io_req)
2420                        io_req->dr_done = NULL;
2421                goto fnic_device_reset_clean;
2422        }
2423        spin_lock_irqsave(io_lock, flags);
2424        CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2425        spin_unlock_irqrestore(io_lock, flags);
2426
2427        /*
2428         * Wait on the local completion for LUN reset.  The io_req may be
2429         * freed while we wait since we hold no lock.
2430         */
2431        wait_for_completion_timeout(&tm_done,
2432                                    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2433
2434        spin_lock_irqsave(io_lock, flags);
2435        io_req = (struct fnic_io_req *)CMD_SP(sc);
2436        if (!io_req) {
2437                spin_unlock_irqrestore(io_lock, flags);
2438                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2439                                "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2440                goto fnic_device_reset_end;
2441        }
2442        io_req->dr_done = NULL;
2443
2444        status = CMD_LR_STATUS(sc);
2445
2446        /*
2447         * If lun reset not completed, bail out with failed. io_req
2448         * gets cleaned up during higher levels of EH
2449         */
2450        if (status == FCPIO_INVALID_CODE) {
2451                atomic64_inc(&reset_stats->device_reset_timeouts);
2452                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2453                              "Device reset timed out\n");
2454                CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2455                spin_unlock_irqrestore(io_lock, flags);
2456                int_to_scsilun(sc->device->lun, &fc_lun);
2457                /*
2458                 * Issue abort and terminate on device reset request.
2459                 * If q'ing of terminate fails, retry it after a delay.
2460                 */
2461                while (1) {
2462                        spin_lock_irqsave(io_lock, flags);
2463                        if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2464                                spin_unlock_irqrestore(io_lock, flags);
2465                                break;
2466                        }
2467                        spin_unlock_irqrestore(io_lock, flags);
2468                        if (fnic_queue_abort_io_req(fnic,
2469                                tag | FNIC_TAG_DEV_RST,
2470                                FCPIO_ITMF_ABT_TASK_TERM,
2471                                fc_lun.scsi_lun, io_req)) {
2472                                wait_for_completion_timeout(&tm_done,
2473                                msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2474                        } else {
2475                                spin_lock_irqsave(io_lock, flags);
2476                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2477                                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2478                                io_req->abts_done = &tm_done;
2479                                spin_unlock_irqrestore(io_lock, flags);
2480                                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2481                                "Abort and terminate issued on Device reset "
2482                                "tag 0x%x sc 0x%p\n", tag, sc);
2483                                break;
2484                        }
2485                }
2486                while (1) {
2487                        spin_lock_irqsave(io_lock, flags);
2488                        if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2489                                spin_unlock_irqrestore(io_lock, flags);
2490                                wait_for_completion_timeout(&tm_done,
2491                                msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2492                                break;
2493                        } else {
2494                                io_req = (struct fnic_io_req *)CMD_SP(sc);
2495                                io_req->abts_done = NULL;
2496                                goto fnic_device_reset_clean;
2497                        }
2498                }
2499        } else {
2500                spin_unlock_irqrestore(io_lock, flags);
2501        }
2502
2503        /* Completed, but not successful, clean up the io_req, return fail */
2504        if (status != FCPIO_SUCCESS) {
2505                spin_lock_irqsave(io_lock, flags);
2506                FNIC_SCSI_DBG(KERN_DEBUG,
2507                              fnic->lport->host,
2508                              "Device reset completed - failed\n");
2509                io_req = (struct fnic_io_req *)CMD_SP(sc);
2510                goto fnic_device_reset_clean;
2511        }
2512
2513        /*
2514         * Clean up any aborts on this lun that have still not
2515         * completed. If any of these fail, then LUN reset fails.
2516         * clean_pending_aborts cleans all cmds on this lun except
2517         * the lun reset cmd. If all cmds get cleaned, the lun reset
2518         * succeeds
2519         */
2520        if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2521                spin_lock_irqsave(io_lock, flags);
2522                io_req = (struct fnic_io_req *)CMD_SP(sc);
2523                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2524                              "Device reset failed"
2525                              " since could not abort all IOs\n");
2526                goto fnic_device_reset_clean;
2527        }
2528
2529        /* Clean lun reset command */
2530        spin_lock_irqsave(io_lock, flags);
2531        io_req = (struct fnic_io_req *)CMD_SP(sc);
2532        if (io_req)
2533                /* Completed, and successful */
2534                ret = SUCCESS;
2535
2536fnic_device_reset_clean:
2537        if (io_req)
2538                CMD_SP(sc) = NULL;
2539
2540        spin_unlock_irqrestore(io_lock, flags);
2541
2542        if (io_req) {
2543                start_time = io_req->start_time;
2544                fnic_release_ioreq_buf(fnic, io_req, sc);
2545                mempool_free(io_req, fnic->io_req_pool);
2546        }
2547
2548fnic_device_reset_end:
2549        FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2550                  sc->request->tag, sc,
2551                  jiffies_to_msecs(jiffies - start_time),
2552                  0, ((u64)sc->cmnd[0] << 32 |
2553                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2554                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2555                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2556
2557        /* free tag if it is allocated */
2558        if (unlikely(tag_gen_flag))
2559                fnic_scsi_host_end_tag(fnic, sc);
2560
2561        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2562                      "Returning from device reset %s\n",
2563                      (ret == SUCCESS) ?
2564                      "SUCCESS" : "FAILED");
2565
2566        if (ret == FAILED)
2567                atomic64_inc(&reset_stats->device_reset_failures);
2568
2569        return ret;
2570}
2571
2572/* Clean up all IOs, clean up libFC local port */
2573int fnic_reset(struct Scsi_Host *shost)
2574{
2575        struct fc_lport *lp;
2576        struct fnic *fnic;
2577        int ret = 0;
2578        struct reset_stats *reset_stats;
2579
2580        lp = shost_priv(shost);
2581        fnic = lport_priv(lp);
2582        reset_stats = &fnic->fnic_stats.reset_stats;
2583
2584        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2585                      "fnic_reset called\n");
2586
2587        atomic64_inc(&reset_stats->fnic_resets);
2588
2589        /*
2590         * Reset local port, this will clean up libFC exchanges,
2591         * reset remote port sessions, and if link is up, begin flogi
2592         */
2593        ret = fc_lport_reset(lp);
2594
2595        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2596                      "Returning from fnic reset %s\n",
2597                      (ret == 0) ?
2598                      "SUCCESS" : "FAILED");
2599
2600        if (ret == 0)
2601                atomic64_inc(&reset_stats->fnic_reset_completions);
2602        else
2603                atomic64_inc(&reset_stats->fnic_reset_failures);
2604
2605        return ret;
2606}
2607
2608/*
2609 * SCSI Error handling calls driver's eh_host_reset if all prior
2610 * error handling levels return FAILED. If host reset completes
2611 * successfully, and if link is up, then Fabric login begins.
2612 *
2613 * Host Reset is the highest level of error recovery. If this fails, then
2614 * host is offlined by SCSI.
2615 *
2616 */
2617int fnic_host_reset(struct scsi_cmnd *sc)
2618{
2619        int ret;
2620        unsigned long wait_host_tmo;
2621        struct Scsi_Host *shost = sc->device->host;
2622        struct fc_lport *lp = shost_priv(shost);
2623        struct fnic *fnic = lport_priv(lp);
2624        unsigned long flags;
2625
2626        spin_lock_irqsave(&fnic->fnic_lock, flags);
2627        if (!fnic->internal_reset_inprogress) {
2628                fnic->internal_reset_inprogress = true;
2629        } else {
2630                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2631                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2632                        "host reset in progress skipping another host reset\n");
2633                return SUCCESS;
2634        }
2635        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2636
2637        /*
2638         * If fnic_reset is successful, wait for fabric login to complete
2639         * scsi-ml tries to send a TUR to every device if host reset is
2640         * successful, so before returning to scsi, fabric should be up
2641         */
2642        ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2643        if (ret == SUCCESS) {
2644                wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2645                ret = FAILED;
2646                while (time_before(jiffies, wait_host_tmo)) {
2647                        if ((lp->state == LPORT_ST_READY) &&
2648                            (lp->link_up)) {
2649                                ret = SUCCESS;
2650                                break;
2651                        }
2652                        ssleep(1);
2653                }
2654        }
2655
2656        spin_lock_irqsave(&fnic->fnic_lock, flags);
2657        fnic->internal_reset_inprogress = false;
2658        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2659        return ret;
2660}
2661
2662/*
2663 * This fxn is called from libFC when host is removed
2664 */
2665void fnic_scsi_abort_io(struct fc_lport *lp)
2666{
2667        int err = 0;
2668        unsigned long flags;
2669        enum fnic_state old_state;
2670        struct fnic *fnic = lport_priv(lp);
2671        DECLARE_COMPLETION_ONSTACK(remove_wait);
2672
2673        /* Issue firmware reset for fnic, wait for reset to complete */
2674retry_fw_reset:
2675        spin_lock_irqsave(&fnic->fnic_lock, flags);
2676        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2677                /* fw reset is in progress, poll for its completion */
2678                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2679                schedule_timeout(msecs_to_jiffies(100));
2680                goto retry_fw_reset;
2681        }
2682
2683        fnic->remove_wait = &remove_wait;
2684        old_state = fnic->state;
2685        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2686        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2687        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2688
2689        err = fnic_fw_reset_handler(fnic);
2690        if (err) {
2691                spin_lock_irqsave(&fnic->fnic_lock, flags);
2692                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2693                        fnic->state = old_state;
2694                fnic->remove_wait = NULL;
2695                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2696                return;
2697        }
2698
2699        /* Wait for firmware reset to complete */
2700        wait_for_completion_timeout(&remove_wait,
2701                                    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2702
2703        spin_lock_irqsave(&fnic->fnic_lock, flags);
2704        fnic->remove_wait = NULL;
2705        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2706                      "fnic_scsi_abort_io %s\n",
2707                      (fnic->state == FNIC_IN_ETH_MODE) ?
2708                      "SUCCESS" : "FAILED");
2709        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2710
2711}
2712
2713/*
2714 * This fxn called from libFC to clean up driver IO state on link down
2715 */
2716void fnic_scsi_cleanup(struct fc_lport *lp)
2717{
2718        unsigned long flags;
2719        enum fnic_state old_state;
2720        struct fnic *fnic = lport_priv(lp);
2721
2722        /* issue fw reset */
2723retry_fw_reset:
2724        spin_lock_irqsave(&fnic->fnic_lock, flags);
2725        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2726                /* fw reset is in progress, poll for its completion */
2727                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2728                schedule_timeout(msecs_to_jiffies(100));
2729                goto retry_fw_reset;
2730        }
2731        old_state = fnic->state;
2732        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2733        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2734        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2735
2736        if (fnic_fw_reset_handler(fnic)) {
2737                spin_lock_irqsave(&fnic->fnic_lock, flags);
2738                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2739                        fnic->state = old_state;
2740                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2741        }
2742
2743}
2744
2745void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2746{
2747}
2748
2749void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2750{
2751        struct fnic *fnic = lport_priv(lp);
2752
2753        /* Non-zero sid, nothing to do */
2754        if (sid)
2755                goto call_fc_exch_mgr_reset;
2756
2757        if (did) {
2758                fnic_rport_exch_reset(fnic, did);
2759                goto call_fc_exch_mgr_reset;
2760        }
2761
2762        /*
2763         * sid = 0, did = 0
2764         * link down or device being removed
2765         */
2766        if (!fnic->in_remove)
2767                fnic_scsi_cleanup(lp);
2768        else
2769                fnic_scsi_abort_io(lp);
2770
2771        /* call libFC exch mgr reset to reset its exchanges */
2772call_fc_exch_mgr_reset:
2773        fc_exch_mgr_reset(lp, sid, did);
2774
2775}
2776
2777/*
2778 * fnic_is_abts_pending() is a helper function that
2779 * walks through tag map to check if there is any IOs pending,if there is one,
2780 * then it returns 1 (true), otherwise 0 (false)
2781 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2782 * otherwise, it checks for all IOs.
2783 */
2784int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2785{
2786        int tag;
2787        struct fnic_io_req *io_req;
2788        spinlock_t *io_lock;
2789        unsigned long flags;
2790        int ret = 0;
2791        struct scsi_cmnd *sc;
2792        struct scsi_device *lun_dev = NULL;
2793
2794        if (lr_sc)
2795                lun_dev = lr_sc->device;
2796
2797        /* walk again to check, if IOs are still pending in fw */
2798        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2799                sc = scsi_host_find_tag(fnic->lport->host, tag);
2800                /*
2801                 * ignore this lun reset cmd or cmds that do not belong to
2802                 * this lun
2803                 */
2804                if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2805                        continue;
2806
2807                io_lock = fnic_io_lock_hash(fnic, sc);
2808                spin_lock_irqsave(io_lock, flags);
2809
2810                io_req = (struct fnic_io_req *)CMD_SP(sc);
2811
2812                if (!io_req || sc->device != lun_dev) {
2813                        spin_unlock_irqrestore(io_lock, flags);
2814                        continue;
2815                }
2816
2817                /*
2818                 * Found IO that is still pending with firmware and
2819                 * belongs to the LUN that we are resetting
2820                 */
2821                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2822                              "Found IO in %s on lun\n",
2823                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2824
2825                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2826                        ret = 1;
2827                spin_unlock_irqrestore(io_lock, flags);
2828        }
2829
2830        return ret;
2831}
2832