linux/drivers/scsi/fnic/fnic_scsi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#include <linux/mempool.h>
  19#include <linux/errno.h>
  20#include <linux/init.h>
  21#include <linux/workqueue.h>
  22#include <linux/pci.h>
  23#include <linux/scatterlist.h>
  24#include <linux/skbuff.h>
  25#include <linux/spinlock.h>
  26#include <linux/if_ether.h>
  27#include <linux/if_vlan.h>
  28#include <linux/delay.h>
  29#include <linux/gfp.h>
  30#include <scsi/scsi.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34#include <scsi/scsi_tcq.h>
  35#include <scsi/fc/fc_els.h>
  36#include <scsi/fc/fc_fcoe.h>
  37#include <scsi/libfc.h>
  38#include <scsi/fc_frame.h>
  39#include "fnic_io.h"
  40#include "fnic.h"
  41
  42const char *fnic_state_str[] = {
  43        [FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
  44        [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  45        [FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
  46        [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  47};
  48
  49static const char *fnic_ioreq_state_str[] = {
  50        [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
  51        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  52        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  53        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  54        [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  55};
  56
  57static const char *fcpio_status_str[] =  {
  58        [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  59        [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  60        [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  61        [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  62        [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  63        [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  64        [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  65        [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  66        [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  67        [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  68        [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  69        [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  70        [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  71        [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  72        [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  73        [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  74        [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  75        [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  76        [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  77};
  78
  79const char *fnic_state_to_str(unsigned int state)
  80{
  81        if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  82                return "unknown";
  83
  84        return fnic_state_str[state];
  85}
  86
  87static const char *fnic_ioreq_state_to_str(unsigned int state)
  88{
  89        if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  90            !fnic_ioreq_state_str[state])
  91                return "unknown";
  92
  93        return fnic_ioreq_state_str[state];
  94}
  95
  96static const char *fnic_fcpio_status_to_str(unsigned int status)
  97{
  98        if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
  99                return "unknown";
 100
 101        return fcpio_status_str[status];
 102}
 103
 104static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
 105
 106static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
 107                                            struct scsi_cmnd *sc)
 108{
 109        u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
 110
 111        return &fnic->io_req_lock[hash];
 112}
 113
 114static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
 115                                            int tag)
 116{
 117        return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
 118}
 119
 120/*
 121 * Unmap the data buffer and sense buffer for an io_req,
 122 * also unmap and free the device-private scatter/gather list.
 123 */
 124static void fnic_release_ioreq_buf(struct fnic *fnic,
 125                                   struct fnic_io_req *io_req,
 126                                   struct scsi_cmnd *sc)
 127{
 128        if (io_req->sgl_list_pa)
 129                dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
 130                                 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
 131                                 DMA_TO_DEVICE);
 132        scsi_dma_unmap(sc);
 133
 134        if (io_req->sgl_cnt)
 135                mempool_free(io_req->sgl_list_alloc,
 136                             fnic->io_sgl_pool[io_req->sgl_type]);
 137        if (io_req->sense_buf_pa)
 138                dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
 139                                 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 140}
 141
 142/* Free up Copy Wq descriptors. Called with copy_wq lock held */
 143static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 144{
 145        /* if no Ack received from firmware, then nothing to clean */
 146        if (!fnic->fw_ack_recd[0])
 147                return 1;
 148
 149        /*
 150         * Update desc_available count based on number of freed descriptors
 151         * Account for wraparound
 152         */
 153        if (wq->to_clean_index <= fnic->fw_ack_index[0])
 154                wq->ring.desc_avail += (fnic->fw_ack_index[0]
 155                                        - wq->to_clean_index + 1);
 156        else
 157                wq->ring.desc_avail += (wq->ring.desc_count
 158                                        - wq->to_clean_index
 159                                        + fnic->fw_ack_index[0] + 1);
 160
 161        /*
 162         * just bump clean index to ack_index+1 accounting for wraparound
 163         * this will essentially free up all descriptors between
 164         * to_clean_index and fw_ack_index, both inclusive
 165         */
 166        wq->to_clean_index =
 167                (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
 168
 169        /* we have processed the acks received so far */
 170        fnic->fw_ack_recd[0] = 0;
 171        return 0;
 172}
 173
 174
 175/**
 176 * __fnic_set_state_flags
 177 * Sets/Clears bits in fnic's state_flags
 178 **/
 179void
 180__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
 181                        unsigned long clearbits)
 182{
 183        unsigned long flags = 0;
 184        unsigned long host_lock_flags = 0;
 185
 186        spin_lock_irqsave(&fnic->fnic_lock, flags);
 187        spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
 188
 189        if (clearbits)
 190                fnic->state_flags &= ~st_flags;
 191        else
 192                fnic->state_flags |= st_flags;
 193
 194        spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
 195        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 196
 197        return;
 198}
 199
 200
 201/*
 202 * fnic_fw_reset_handler
 203 * Routine to send reset msg to fw
 204 */
 205int fnic_fw_reset_handler(struct fnic *fnic)
 206{
 207        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 208        int ret = 0;
 209        unsigned long flags;
 210
 211        /* indicate fwreset to io path */
 212        fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
 213
 214        skb_queue_purge(&fnic->frame_queue);
 215        skb_queue_purge(&fnic->tx_queue);
 216
 217        /* wait for io cmpl */
 218        while (atomic_read(&fnic->in_flight))
 219                schedule_timeout(msecs_to_jiffies(1));
 220
 221        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 222
 223        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 224                free_wq_copy_descs(fnic, wq);
 225
 226        if (!vnic_wq_copy_desc_avail(wq))
 227                ret = -EAGAIN;
 228        else {
 229                fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
 230                atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 231                if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 232                          atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 233                        atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 234                                atomic64_read(
 235                                  &fnic->fnic_stats.fw_stats.active_fw_reqs));
 236        }
 237
 238        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 239
 240        if (!ret) {
 241                atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
 242                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 243                              "Issued fw reset\n");
 244        } else {
 245                fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 246                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 247                              "Failed to issue fw reset\n");
 248        }
 249
 250        return ret;
 251}
 252
 253
 254/*
 255 * fnic_flogi_reg_handler
 256 * Routine to send flogi register msg to fw
 257 */
 258int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
 259{
 260        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 261        enum fcpio_flogi_reg_format_type format;
 262        struct fc_lport *lp = fnic->lport;
 263        u8 gw_mac[ETH_ALEN];
 264        int ret = 0;
 265        unsigned long flags;
 266
 267        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 268
 269        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 270                free_wq_copy_descs(fnic, wq);
 271
 272        if (!vnic_wq_copy_desc_avail(wq)) {
 273                ret = -EAGAIN;
 274                goto flogi_reg_ioreq_end;
 275        }
 276
 277        if (fnic->ctlr.map_dest) {
 278                memset(gw_mac, 0xff, ETH_ALEN);
 279                format = FCPIO_FLOGI_REG_DEF_DEST;
 280        } else {
 281                memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
 282                format = FCPIO_FLOGI_REG_GW_DEST;
 283        }
 284
 285        if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
 286                fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
 287                                                fc_id, gw_mac,
 288                                                fnic->data_src_addr,
 289                                                lp->r_a_tov, lp->e_d_tov);
 290                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 291                              "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
 292                              fc_id, fnic->data_src_addr, gw_mac);
 293        } else {
 294                fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
 295                                                  format, fc_id, gw_mac);
 296                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 297                              "FLOGI reg issued fcid %x map %d dest %pM\n",
 298                              fc_id, fnic->ctlr.map_dest, gw_mac);
 299        }
 300
 301        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 302        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 303                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 304                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 305                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 306
 307flogi_reg_ioreq_end:
 308        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 309        return ret;
 310}
 311
 312/*
 313 * fnic_queue_wq_copy_desc
 314 * Routine to enqueue a wq copy desc
 315 */
 316static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 317                                          struct vnic_wq_copy *wq,
 318                                          struct fnic_io_req *io_req,
 319                                          struct scsi_cmnd *sc,
 320                                          int sg_count)
 321{
 322        struct scatterlist *sg;
 323        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 324        struct fc_rport_libfc_priv *rp = rport->dd_data;
 325        struct host_sg_desc *desc;
 326        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 327        unsigned int i;
 328        unsigned long intr_flags;
 329        int flags;
 330        u8 exch_flags;
 331        struct scsi_lun fc_lun;
 332
 333        if (sg_count) {
 334                /* For each SGE, create a device desc entry */
 335                desc = io_req->sgl_list;
 336                for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
 337                        desc->addr = cpu_to_le64(sg_dma_address(sg));
 338                        desc->len = cpu_to_le32(sg_dma_len(sg));
 339                        desc->_resvd = 0;
 340                        desc++;
 341                }
 342
 343                io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
 344                                io_req->sgl_list,
 345                                sizeof(io_req->sgl_list[0]) * sg_count,
 346                                DMA_TO_DEVICE);
 347                if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
 348                        printk(KERN_ERR "DMA mapping failed\n");
 349                        return SCSI_MLQUEUE_HOST_BUSY;
 350                }
 351        }
 352
 353        io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
 354                                              sc->sense_buffer,
 355                                              SCSI_SENSE_BUFFERSIZE,
 356                                              DMA_FROM_DEVICE);
 357        if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
 358                dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
 359                                sizeof(io_req->sgl_list[0]) * sg_count,
 360                                DMA_TO_DEVICE);
 361                printk(KERN_ERR "DMA mapping failed\n");
 362                return SCSI_MLQUEUE_HOST_BUSY;
 363        }
 364
 365        int_to_scsilun(sc->device->lun, &fc_lun);
 366
 367        /* Enqueue the descriptor in the Copy WQ */
 368        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 369
 370        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 371                free_wq_copy_descs(fnic, wq);
 372
 373        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
 374                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 375                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 376                          "fnic_queue_wq_copy_desc failure - no descriptors\n");
 377                atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
 378                return SCSI_MLQUEUE_HOST_BUSY;
 379        }
 380
 381        flags = 0;
 382        if (sc->sc_data_direction == DMA_FROM_DEVICE)
 383                flags = FCPIO_ICMND_RDDATA;
 384        else if (sc->sc_data_direction == DMA_TO_DEVICE)
 385                flags = FCPIO_ICMND_WRDATA;
 386
 387        exch_flags = 0;
 388        if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
 389            (rp->flags & FC_RP_FLAGS_RETRY))
 390                exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
 391
 392        fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
 393                                         0, exch_flags, io_req->sgl_cnt,
 394                                         SCSI_SENSE_BUFFERSIZE,
 395                                         io_req->sgl_list_pa,
 396                                         io_req->sense_buf_pa,
 397                                         0, /* scsi cmd ref, always 0 */
 398                                         FCPIO_ICMND_PTA_SIMPLE,
 399                                                /* scsi pri and tag */
 400                                         flags, /* command flags */
 401                                         sc->cmnd, sc->cmd_len,
 402                                         scsi_bufflen(sc),
 403                                         fc_lun.scsi_lun, io_req->port_id,
 404                                         rport->maxframe_size, rp->r_a_tov,
 405                                         rp->e_d_tov);
 406
 407        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 408        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 409                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 410                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 411                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 412
 413        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 414        return 0;
 415}
 416
 417/*
 418 * fnic_queuecommand
 419 * Routine to send a scsi cdb
 420 * Called with host_lock held and interrupts disabled.
 421 */
 422static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 423{
 424        struct fc_lport *lp = shost_priv(sc->device->host);
 425        struct fc_rport *rport;
 426        struct fnic_io_req *io_req = NULL;
 427        struct fnic *fnic = lport_priv(lp);
 428        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 429        struct vnic_wq_copy *wq;
 430        int ret;
 431        u64 cmd_trace;
 432        int sg_count = 0;
 433        unsigned long flags = 0;
 434        unsigned long ptr;
 435        spinlock_t *io_lock = NULL;
 436        int io_lock_acquired = 0;
 437        struct fc_rport_libfc_priv *rp;
 438
 439        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
 440                return SCSI_MLQUEUE_HOST_BUSY;
 441
 442        rport = starget_to_rport(scsi_target(sc->device));
 443        if (!rport) {
 444                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 445                                "returning DID_NO_CONNECT for IO as rport is NULL\n");
 446                sc->result = DID_NO_CONNECT << 16;
 447                done(sc);
 448                return 0;
 449        }
 450
 451        ret = fc_remote_port_chkready(rport);
 452        if (ret) {
 453                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 454                                "rport is not ready\n");
 455                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 456                sc->result = ret;
 457                done(sc);
 458                return 0;
 459        }
 460
 461        rp = rport->dd_data;
 462        if (!rp || rp->rp_state == RPORT_ST_DELETE) {
 463                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 464                        "rport 0x%x removed, returning DID_NO_CONNECT\n",
 465                        rport->port_id);
 466
 467                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 468                sc->result = DID_NO_CONNECT<<16;
 469                done(sc);
 470                return 0;
 471        }
 472
 473        if (rp->rp_state != RPORT_ST_READY) {
 474                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 475                        "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
 476                        rport->port_id, rp->rp_state);
 477
 478                sc->result = DID_IMM_RETRY << 16;
 479                done(sc);
 480                return 0;
 481        }
 482
 483        if (lp->state != LPORT_ST_READY || !(lp->link_up))
 484                return SCSI_MLQUEUE_HOST_BUSY;
 485
 486        atomic_inc(&fnic->in_flight);
 487
 488        /*
 489         * Release host lock, use driver resource specific locks from here.
 490         * Don't re-enable interrupts in case they were disabled prior to the
 491         * caller disabling them.
 492         */
 493        spin_unlock(lp->host->host_lock);
 494        CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
 495        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 496
 497        /* Get a new io_req for this SCSI IO */
 498        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
 499        if (!io_req) {
 500                atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 501                ret = SCSI_MLQUEUE_HOST_BUSY;
 502                goto out;
 503        }
 504        memset(io_req, 0, sizeof(*io_req));
 505
 506        /* Map the data buffer */
 507        sg_count = scsi_dma_map(sc);
 508        if (sg_count < 0) {
 509                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 510                          sc->request->tag, sc, 0, sc->cmnd[0],
 511                          sg_count, CMD_STATE(sc));
 512                mempool_free(io_req, fnic->io_req_pool);
 513                goto out;
 514        }
 515
 516        /* Determine the type of scatter/gather list we need */
 517        io_req->sgl_cnt = sg_count;
 518        io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
 519        if (sg_count > FNIC_DFLT_SG_DESC_CNT)
 520                io_req->sgl_type = FNIC_SGL_CACHE_MAX;
 521
 522        if (sg_count) {
 523                io_req->sgl_list =
 524                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
 525                                      GFP_ATOMIC);
 526                if (!io_req->sgl_list) {
 527                        atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 528                        ret = SCSI_MLQUEUE_HOST_BUSY;
 529                        scsi_dma_unmap(sc);
 530                        mempool_free(io_req, fnic->io_req_pool);
 531                        goto out;
 532                }
 533
 534                /* Cache sgl list allocated address before alignment */
 535                io_req->sgl_list_alloc = io_req->sgl_list;
 536                ptr = (unsigned long) io_req->sgl_list;
 537                if (ptr % FNIC_SG_DESC_ALIGN) {
 538                        io_req->sgl_list = (struct host_sg_desc *)
 539                                (((unsigned long) ptr
 540                                  + FNIC_SG_DESC_ALIGN - 1)
 541                                 & ~(FNIC_SG_DESC_ALIGN - 1));
 542                }
 543        }
 544
 545        /*
 546        * Will acquire lock defore setting to IO initialized.
 547        */
 548
 549        io_lock = fnic_io_lock_hash(fnic, sc);
 550        spin_lock_irqsave(io_lock, flags);
 551
 552        /* initialize rest of io_req */
 553        io_lock_acquired = 1;
 554        io_req->port_id = rport->port_id;
 555        io_req->start_time = jiffies;
 556        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
 557        CMD_SP(sc) = (char *)io_req;
 558        CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
 559        sc->scsi_done = done;
 560
 561        /* create copy wq desc and enqueue it */
 562        wq = &fnic->wq_copy[0];
 563        ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
 564        if (ret) {
 565                /*
 566                 * In case another thread cancelled the request,
 567                 * refetch the pointer under the lock.
 568                 */
 569                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 570                          sc->request->tag, sc, 0, 0, 0,
 571                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
 572                io_req = (struct fnic_io_req *)CMD_SP(sc);
 573                CMD_SP(sc) = NULL;
 574                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 575                spin_unlock_irqrestore(io_lock, flags);
 576                if (io_req) {
 577                        fnic_release_ioreq_buf(fnic, io_req, sc);
 578                        mempool_free(io_req, fnic->io_req_pool);
 579                }
 580                atomic_dec(&fnic->in_flight);
 581                /* acquire host lock before returning to SCSI */
 582                spin_lock(lp->host->host_lock);
 583                return ret;
 584        } else {
 585                atomic64_inc(&fnic_stats->io_stats.active_ios);
 586                atomic64_inc(&fnic_stats->io_stats.num_ios);
 587                if (atomic64_read(&fnic_stats->io_stats.active_ios) >
 588                          atomic64_read(&fnic_stats->io_stats.max_active_ios))
 589                        atomic64_set(&fnic_stats->io_stats.max_active_ios,
 590                             atomic64_read(&fnic_stats->io_stats.active_ios));
 591
 592                /* REVISIT: Use per IO lock in the final code */
 593                CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
 594        }
 595out:
 596        cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
 597                        (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
 598                        (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
 599                        sc->cmnd[5]);
 600
 601        FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 602                  sc->request->tag, sc, io_req,
 603                  sg_count, cmd_trace,
 604                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 605
 606        /* if only we issued IO, will we have the io lock */
 607        if (io_lock_acquired)
 608                spin_unlock_irqrestore(io_lock, flags);
 609
 610        atomic_dec(&fnic->in_flight);
 611        /* acquire host lock before returning to SCSI */
 612        spin_lock(lp->host->host_lock);
 613        return ret;
 614}
 615
 616DEF_SCSI_QCMD(fnic_queuecommand)
 617
 618/*
 619 * fnic_fcpio_fw_reset_cmpl_handler
 620 * Routine to handle fw reset completion
 621 */
 622static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 623                                            struct fcpio_fw_req *desc)
 624{
 625        u8 type;
 626        u8 hdr_status;
 627        struct fcpio_tag tag;
 628        int ret = 0;
 629        unsigned long flags;
 630        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
 631
 632        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 633
 634        atomic64_inc(&reset_stats->fw_reset_completions);
 635
 636        /* Clean up all outstanding io requests */
 637        fnic_cleanup_io(fnic, SCSI_NO_TAG);
 638
 639        atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
 640        atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
 641        atomic64_set(&fnic->io_cmpl_skip, 0);
 642
 643        spin_lock_irqsave(&fnic->fnic_lock, flags);
 644
 645        /* fnic should be in FC_TRANS_ETH_MODE */
 646        if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
 647                /* Check status of reset completion */
 648                if (!hdr_status) {
 649                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 650                                      "reset cmpl success\n");
 651                        /* Ready to send flogi out */
 652                        fnic->state = FNIC_IN_ETH_MODE;
 653                } else {
 654                        FNIC_SCSI_DBG(KERN_DEBUG,
 655                                      fnic->lport->host,
 656                                      "fnic fw_reset : failed %s\n",
 657                                      fnic_fcpio_status_to_str(hdr_status));
 658
 659                        /*
 660                         * Unable to change to eth mode, cannot send out flogi
 661                         * Change state to fc mode, so that subsequent Flogi
 662                         * requests from libFC will cause more attempts to
 663                         * reset the firmware. Free the cached flogi
 664                         */
 665                        fnic->state = FNIC_IN_FC_MODE;
 666                        atomic64_inc(&reset_stats->fw_reset_failures);
 667                        ret = -1;
 668                }
 669        } else {
 670                FNIC_SCSI_DBG(KERN_DEBUG,
 671                              fnic->lport->host,
 672                              "Unexpected state %s while processing"
 673                              " reset cmpl\n", fnic_state_to_str(fnic->state));
 674                atomic64_inc(&reset_stats->fw_reset_failures);
 675                ret = -1;
 676        }
 677
 678        /* Thread removing device blocks till firmware reset is complete */
 679        if (fnic->remove_wait)
 680                complete(fnic->remove_wait);
 681
 682        /*
 683         * If fnic is being removed, or fw reset failed
 684         * free the flogi frame. Else, send it out
 685         */
 686        if (fnic->remove_wait || ret) {
 687                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 688                skb_queue_purge(&fnic->tx_queue);
 689                goto reset_cmpl_handler_end;
 690        }
 691
 692        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 693
 694        fnic_flush_tx(fnic);
 695
 696 reset_cmpl_handler_end:
 697        fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 698
 699        return ret;
 700}
 701
 702/*
 703 * fnic_fcpio_flogi_reg_cmpl_handler
 704 * Routine to handle flogi register completion
 705 */
 706static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 707                                             struct fcpio_fw_req *desc)
 708{
 709        u8 type;
 710        u8 hdr_status;
 711        struct fcpio_tag tag;
 712        int ret = 0;
 713        unsigned long flags;
 714
 715        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 716
 717        /* Update fnic state based on status of flogi reg completion */
 718        spin_lock_irqsave(&fnic->fnic_lock, flags);
 719
 720        if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
 721
 722                /* Check flogi registration completion status */
 723                if (!hdr_status) {
 724                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 725                                      "flog reg succeeded\n");
 726                        fnic->state = FNIC_IN_FC_MODE;
 727                } else {
 728                        FNIC_SCSI_DBG(KERN_DEBUG,
 729                                      fnic->lport->host,
 730                                      "fnic flogi reg :failed %s\n",
 731                                      fnic_fcpio_status_to_str(hdr_status));
 732                        fnic->state = FNIC_IN_ETH_MODE;
 733                        ret = -1;
 734                }
 735        } else {
 736                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 737                              "Unexpected fnic state %s while"
 738                              " processing flogi reg completion\n",
 739                              fnic_state_to_str(fnic->state));
 740                ret = -1;
 741        }
 742
 743        if (!ret) {
 744                if (fnic->stop_rx_link_events) {
 745                        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 746                        goto reg_cmpl_handler_end;
 747                }
 748                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 749
 750                fnic_flush_tx(fnic);
 751                queue_work(fnic_event_queue, &fnic->frame_work);
 752        } else {
 753                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 754        }
 755
 756reg_cmpl_handler_end:
 757        return ret;
 758}
 759
 760static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
 761                                        u16 request_out)
 762{
 763        if (wq->to_clean_index <= wq->to_use_index) {
 764                /* out of range, stale request_out index */
 765                if (request_out < wq->to_clean_index ||
 766                    request_out >= wq->to_use_index)
 767                        return 0;
 768        } else {
 769                /* out of range, stale request_out index */
 770                if (request_out < wq->to_clean_index &&
 771                    request_out >= wq->to_use_index)
 772                        return 0;
 773        }
 774        /* request_out index is in range */
 775        return 1;
 776}
 777
 778
 779/*
 780 * Mark that ack received and store the Ack index. If there are multiple
 781 * acks received before Tx thread cleans it up, the latest value will be
 782 * used which is correct behavior. This state should be in the copy Wq
 783 * instead of in the fnic
 784 */
 785static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
 786                                          unsigned int cq_index,
 787                                          struct fcpio_fw_req *desc)
 788{
 789        struct vnic_wq_copy *wq;
 790        u16 request_out = desc->u.ack.request_out;
 791        unsigned long flags;
 792        u64 *ox_id_tag = (u64 *)(void *)desc;
 793
 794        /* mark the ack state */
 795        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
 796        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 797
 798        fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
 799        if (is_ack_index_in_range(wq, request_out)) {
 800                fnic->fw_ack_index[0] = request_out;
 801                fnic->fw_ack_recd[0] = 1;
 802        } else
 803                atomic64_inc(
 804                        &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
 805
 806        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 807        FNIC_TRACE(fnic_fcpio_ack_handler,
 808                  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
 809                  ox_id_tag[4], ox_id_tag[5]);
 810}
 811
 812/*
 813 * fnic_fcpio_icmnd_cmpl_handler
 814 * Routine to handle icmnd completions
 815 */
 816static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 817                                         struct fcpio_fw_req *desc)
 818{
 819        u8 type;
 820        u8 hdr_status;
 821        struct fcpio_tag tag;
 822        u32 id;
 823        u64 xfer_len = 0;
 824        struct fcpio_icmnd_cmpl *icmnd_cmpl;
 825        struct fnic_io_req *io_req;
 826        struct scsi_cmnd *sc;
 827        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 828        unsigned long flags;
 829        spinlock_t *io_lock;
 830        u64 cmd_trace;
 831        unsigned long start_time;
 832        unsigned long io_duration_time;
 833
 834        /* Decode the cmpl description to get the io_req id */
 835        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 836        fcpio_tag_id_dec(&tag, &id);
 837        icmnd_cmpl = &desc->u.icmnd_cmpl;
 838
 839        if (id >= fnic->fnic_max_tag_id) {
 840                shost_printk(KERN_ERR, fnic->lport->host,
 841                        "Tag out of range tag %x hdr status = %s\n",
 842                             id, fnic_fcpio_status_to_str(hdr_status));
 843                return;
 844        }
 845
 846        sc = scsi_host_find_tag(fnic->lport->host, id);
 847        WARN_ON_ONCE(!sc);
 848        if (!sc) {
 849                atomic64_inc(&fnic_stats->io_stats.sc_null);
 850                shost_printk(KERN_ERR, fnic->lport->host,
 851                          "icmnd_cmpl sc is null - "
 852                          "hdr status = %s tag = 0x%x desc = 0x%p\n",
 853                          fnic_fcpio_status_to_str(hdr_status), id, desc);
 854                FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
 855                          fnic->lport->host->host_no, id,
 856                          ((u64)icmnd_cmpl->_resvd0[1] << 16 |
 857                          (u64)icmnd_cmpl->_resvd0[0]),
 858                          ((u64)hdr_status << 16 |
 859                          (u64)icmnd_cmpl->scsi_status << 8 |
 860                          (u64)icmnd_cmpl->flags), desc,
 861                          (u64)icmnd_cmpl->residual, 0);
 862                return;
 863        }
 864
 865        io_lock = fnic_io_lock_hash(fnic, sc);
 866        spin_lock_irqsave(io_lock, flags);
 867        io_req = (struct fnic_io_req *)CMD_SP(sc);
 868        WARN_ON_ONCE(!io_req);
 869        if (!io_req) {
 870                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 871                CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
 872                spin_unlock_irqrestore(io_lock, flags);
 873                shost_printk(KERN_ERR, fnic->lport->host,
 874                          "icmnd_cmpl io_req is null - "
 875                          "hdr status = %s tag = 0x%x sc 0x%p\n",
 876                          fnic_fcpio_status_to_str(hdr_status), id, sc);
 877                return;
 878        }
 879        start_time = io_req->start_time;
 880
 881        /* firmware completed the io */
 882        io_req->io_completed = 1;
 883
 884        /*
 885         *  if SCSI-ML has already issued abort on this command,
 886         *  set completion of the IO. The abts path will clean it up
 887         */
 888        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
 889
 890                /*
 891                 * set the FNIC_IO_DONE so that this doesn't get
 892                 * flagged as 'out of order' if it was not aborted
 893                 */
 894                CMD_FLAGS(sc) |= FNIC_IO_DONE;
 895                CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
 896                spin_unlock_irqrestore(io_lock, flags);
 897                if(FCPIO_ABORTED == hdr_status)
 898                        CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
 899
 900                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 901                        "icmnd_cmpl abts pending "
 902                          "hdr status = %s tag = 0x%x sc = 0x%p "
 903                          "scsi_status = %x residual = %d\n",
 904                          fnic_fcpio_status_to_str(hdr_status),
 905                          id, sc,
 906                          icmnd_cmpl->scsi_status,
 907                          icmnd_cmpl->residual);
 908                return;
 909        }
 910
 911        /* Mark the IO as complete */
 912        CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 913
 914        icmnd_cmpl = &desc->u.icmnd_cmpl;
 915
 916        switch (hdr_status) {
 917        case FCPIO_SUCCESS:
 918                sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
 919                xfer_len = scsi_bufflen(sc);
 920                scsi_set_resid(sc, icmnd_cmpl->residual);
 921
 922                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 923                        xfer_len -= icmnd_cmpl->residual;
 924
 925                if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
 926                        atomic64_inc(&fnic_stats->misc_stats.check_condition);
 927
 928                if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
 929                        atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
 930                break;
 931
 932        case FCPIO_TIMEOUT:          /* request was timed out */
 933                atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
 934                sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
 935                break;
 936
 937        case FCPIO_ABORTED:          /* request was aborted */
 938                atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
 939                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 940                break;
 941
 942        case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
 943                atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
 944                scsi_set_resid(sc, icmnd_cmpl->residual);
 945                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 946                break;
 947
 948        case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
 949                atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
 950                sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
 951                break;
 952
 953        case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
 954                atomic64_inc(&fnic_stats->io_stats.io_not_found);
 955                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 956                break;
 957
 958        case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
 959                atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
 960                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 961                break;
 962
 963        case FCPIO_FW_ERR:           /* request was terminated due fw error */
 964                atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
 965                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 966                break;
 967
 968        case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
 969                atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
 970                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 971                break;
 972
 973        case FCPIO_INVALID_HEADER:   /* header contains invalid data */
 974        case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
 975        case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
 976        default:
 977                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 978                break;
 979        }
 980
 981        /* Break link with the SCSI command */
 982        CMD_SP(sc) = NULL;
 983        CMD_FLAGS(sc) |= FNIC_IO_DONE;
 984
 985        spin_unlock_irqrestore(io_lock, flags);
 986
 987        if (hdr_status != FCPIO_SUCCESS) {
 988                atomic64_inc(&fnic_stats->io_stats.io_failures);
 989                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
 990                             fnic_fcpio_status_to_str(hdr_status));
 991        }
 992
 993        fnic_release_ioreq_buf(fnic, io_req, sc);
 994
 995        mempool_free(io_req, fnic->io_req_pool);
 996
 997        cmd_trace = ((u64)hdr_status << 56) |
 998                  (u64)icmnd_cmpl->scsi_status << 48 |
 999                  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1000                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1001                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1002
1003        FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1004                  sc->device->host->host_no, id, sc,
1005                  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1006                  (u64)icmnd_cmpl->_resvd0[0] << 48 |
1007                  jiffies_to_msecs(jiffies - start_time)),
1008                  desc, cmd_trace,
1009                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1010
1011        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1012                fnic->lport->host_stats.fcp_input_requests++;
1013                fnic->fcp_input_bytes += xfer_len;
1014        } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1015                fnic->lport->host_stats.fcp_output_requests++;
1016                fnic->fcp_output_bytes += xfer_len;
1017        } else
1018                fnic->lport->host_stats.fcp_control_requests++;
1019
1020        atomic64_dec(&fnic_stats->io_stats.active_ios);
1021        if (atomic64_read(&fnic->io_cmpl_skip))
1022                atomic64_dec(&fnic->io_cmpl_skip);
1023        else
1024                atomic64_inc(&fnic_stats->io_stats.io_completions);
1025
1026
1027        io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1028
1029        if(io_duration_time <= 10)
1030                atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1031        else if(io_duration_time <= 100)
1032                atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1033        else if(io_duration_time <= 500)
1034                atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1035        else if(io_duration_time <= 5000)
1036                atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1037        else if(io_duration_time <= 10000)
1038                atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1039        else if(io_duration_time <= 30000)
1040                atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1041        else {
1042                atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1043
1044                if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1045                        atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1046        }
1047
1048        /* Call SCSI completion function to complete the IO */
1049        if (sc->scsi_done)
1050                sc->scsi_done(sc);
1051}
1052
1053/* fnic_fcpio_itmf_cmpl_handler
1054 * Routine to handle itmf completions
1055 */
1056static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1057                                        struct fcpio_fw_req *desc)
1058{
1059        u8 type;
1060        u8 hdr_status;
1061        struct fcpio_tag tag;
1062        u32 id;
1063        struct scsi_cmnd *sc;
1064        struct fnic_io_req *io_req;
1065        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1066        struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1067        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1068        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1069        unsigned long flags;
1070        spinlock_t *io_lock;
1071        unsigned long start_time;
1072
1073        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1074        fcpio_tag_id_dec(&tag, &id);
1075
1076        if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1077                shost_printk(KERN_ERR, fnic->lport->host,
1078                "Tag out of range tag %x hdr status = %s\n",
1079                id, fnic_fcpio_status_to_str(hdr_status));
1080                return;
1081        }
1082
1083        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1084        WARN_ON_ONCE(!sc);
1085        if (!sc) {
1086                atomic64_inc(&fnic_stats->io_stats.sc_null);
1087                shost_printk(KERN_ERR, fnic->lport->host,
1088                          "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1089                          fnic_fcpio_status_to_str(hdr_status), id);
1090                return;
1091        }
1092        io_lock = fnic_io_lock_hash(fnic, sc);
1093        spin_lock_irqsave(io_lock, flags);
1094        io_req = (struct fnic_io_req *)CMD_SP(sc);
1095        WARN_ON_ONCE(!io_req);
1096        if (!io_req) {
1097                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1098                spin_unlock_irqrestore(io_lock, flags);
1099                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1100                shost_printk(KERN_ERR, fnic->lport->host,
1101                          "itmf_cmpl io_req is null - "
1102                          "hdr status = %s tag = 0x%x sc 0x%p\n",
1103                          fnic_fcpio_status_to_str(hdr_status), id, sc);
1104                return;
1105        }
1106        start_time = io_req->start_time;
1107
1108        if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1109                /* Abort and terminate completion of device reset req */
1110                /* REVISIT : Add asserts about various flags */
1111                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1112                              "dev reset abts cmpl recd. id %x status %s\n",
1113                              id, fnic_fcpio_status_to_str(hdr_status));
1114                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1115                CMD_ABTS_STATUS(sc) = hdr_status;
1116                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1117                if (io_req->abts_done)
1118                        complete(io_req->abts_done);
1119                spin_unlock_irqrestore(io_lock, flags);
1120        } else if (id & FNIC_TAG_ABORT) {
1121                /* Completion of abort cmd */
1122                switch (hdr_status) {
1123                case FCPIO_SUCCESS:
1124                        break;
1125                case FCPIO_TIMEOUT:
1126                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1127                                atomic64_inc(&abts_stats->abort_fw_timeouts);
1128                        else
1129                                atomic64_inc(
1130                                        &term_stats->terminate_fw_timeouts);
1131                        break;
1132                case FCPIO_ITMF_REJECTED:
1133                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1134                                "abort reject recd. id %d\n",
1135                                (int)(id & FNIC_TAG_MASK));
1136                        break;
1137                case FCPIO_IO_NOT_FOUND:
1138                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1139                                atomic64_inc(&abts_stats->abort_io_not_found);
1140                        else
1141                                atomic64_inc(
1142                                        &term_stats->terminate_io_not_found);
1143                        break;
1144                default:
1145                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1146                                atomic64_inc(&abts_stats->abort_failures);
1147                        else
1148                                atomic64_inc(
1149                                        &term_stats->terminate_failures);
1150                        break;
1151                }
1152                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1153                        /* This is a late completion. Ignore it */
1154                        spin_unlock_irqrestore(io_lock, flags);
1155                        return;
1156                }
1157
1158                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1159                CMD_ABTS_STATUS(sc) = hdr_status;
1160
1161                /* If the status is IO not found consider it as success */
1162                if (hdr_status == FCPIO_IO_NOT_FOUND)
1163                        CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1164
1165                if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1166                        atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1167
1168                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1169                              "abts cmpl recd. id %d status %s\n",
1170                              (int)(id & FNIC_TAG_MASK),
1171                              fnic_fcpio_status_to_str(hdr_status));
1172
1173                /*
1174                 * If scsi_eh thread is blocked waiting for abts to complete,
1175                 * signal completion to it. IO will be cleaned in the thread
1176                 * else clean it in this context
1177                 */
1178                if (io_req->abts_done) {
1179                        complete(io_req->abts_done);
1180                        spin_unlock_irqrestore(io_lock, flags);
1181                } else {
1182                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1183                                      "abts cmpl, completing IO\n");
1184                        CMD_SP(sc) = NULL;
1185                        sc->result = (DID_ERROR << 16);
1186
1187                        spin_unlock_irqrestore(io_lock, flags);
1188
1189                        fnic_release_ioreq_buf(fnic, io_req, sc);
1190                        mempool_free(io_req, fnic->io_req_pool);
1191                        if (sc->scsi_done) {
1192                                FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1193                                        sc->device->host->host_no, id,
1194                                        sc,
1195                                        jiffies_to_msecs(jiffies - start_time),
1196                                        desc,
1197                                        (((u64)hdr_status << 40) |
1198                                        (u64)sc->cmnd[0] << 32 |
1199                                        (u64)sc->cmnd[2] << 24 |
1200                                        (u64)sc->cmnd[3] << 16 |
1201                                        (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1202                                        (((u64)CMD_FLAGS(sc) << 32) |
1203                                        CMD_STATE(sc)));
1204                                sc->scsi_done(sc);
1205                                atomic64_dec(&fnic_stats->io_stats.active_ios);
1206                                if (atomic64_read(&fnic->io_cmpl_skip))
1207                                        atomic64_dec(&fnic->io_cmpl_skip);
1208                                else
1209                                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1210                        }
1211                }
1212
1213        } else if (id & FNIC_TAG_DEV_RST) {
1214                /* Completion of device reset */
1215                CMD_LR_STATUS(sc) = hdr_status;
1216                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1217                        spin_unlock_irqrestore(io_lock, flags);
1218                        CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1219                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1220                                  sc->device->host->host_no, id, sc,
1221                                  jiffies_to_msecs(jiffies - start_time),
1222                                  desc, 0,
1223                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1224                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1225                                "Terminate pending "
1226                                "dev reset cmpl recd. id %d status %s\n",
1227                                (int)(id & FNIC_TAG_MASK),
1228                                fnic_fcpio_status_to_str(hdr_status));
1229                        return;
1230                }
1231                if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1232                        /* Need to wait for terminate completion */
1233                        spin_unlock_irqrestore(io_lock, flags);
1234                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1235                                  sc->device->host->host_no, id, sc,
1236                                  jiffies_to_msecs(jiffies - start_time),
1237                                  desc, 0,
1238                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1239                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1240                                "dev reset cmpl recd after time out. "
1241                                "id %d status %s\n",
1242                                (int)(id & FNIC_TAG_MASK),
1243                                fnic_fcpio_status_to_str(hdr_status));
1244                        return;
1245                }
1246                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1247                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1248                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1249                              "dev reset cmpl recd. id %d status %s\n",
1250                              (int)(id & FNIC_TAG_MASK),
1251                              fnic_fcpio_status_to_str(hdr_status));
1252                if (io_req->dr_done)
1253                        complete(io_req->dr_done);
1254                spin_unlock_irqrestore(io_lock, flags);
1255
1256        } else {
1257                shost_printk(KERN_ERR, fnic->lport->host,
1258                             "Unexpected itmf io state %s tag %x\n",
1259                             fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1260                spin_unlock_irqrestore(io_lock, flags);
1261        }
1262
1263}
1264
1265/*
1266 * fnic_fcpio_cmpl_handler
1267 * Routine to service the cq for wq_copy
1268 */
1269static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1270                                   unsigned int cq_index,
1271                                   struct fcpio_fw_req *desc)
1272{
1273        struct fnic *fnic = vnic_dev_priv(vdev);
1274
1275        switch (desc->hdr.type) {
1276        case FCPIO_ICMND_CMPL: /* fw completed a command */
1277        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1278        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1279        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1280        case FCPIO_RESET_CMPL: /* fw completed reset */
1281                atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1282                break;
1283        default:
1284                break;
1285        }
1286
1287        switch (desc->hdr.type) {
1288        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1289                fnic_fcpio_ack_handler(fnic, cq_index, desc);
1290                break;
1291
1292        case FCPIO_ICMND_CMPL: /* fw completed a command */
1293                fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1294                break;
1295
1296        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1297                fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1298                break;
1299
1300        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1301        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1302                fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1303                break;
1304
1305        case FCPIO_RESET_CMPL: /* fw completed reset */
1306                fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1307                break;
1308
1309        default:
1310                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1311                              "firmware completion type %d\n",
1312                              desc->hdr.type);
1313                break;
1314        }
1315
1316        return 0;
1317}
1318
1319/*
1320 * fnic_wq_copy_cmpl_handler
1321 * Routine to process wq copy
1322 */
1323int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1324{
1325        unsigned int wq_work_done = 0;
1326        unsigned int i, cq_index;
1327        unsigned int cur_work_done;
1328        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1329        u64 start_jiffies = 0;
1330        u64 end_jiffies = 0;
1331        u64 delta_jiffies = 0;
1332        u64 delta_ms = 0;
1333
1334        for (i = 0; i < fnic->wq_copy_count; i++) {
1335                cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1336
1337                start_jiffies = jiffies;
1338                cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1339                                                     fnic_fcpio_cmpl_handler,
1340                                                     copy_work_to_do);
1341                end_jiffies = jiffies;
1342
1343                wq_work_done += cur_work_done;
1344                delta_jiffies = end_jiffies - start_jiffies;
1345                if (delta_jiffies >
1346                        (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1347                        atomic64_set(&misc_stats->max_isr_jiffies,
1348                                        delta_jiffies);
1349                        delta_ms = jiffies_to_msecs(delta_jiffies);
1350                        atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1351                        atomic64_set(&misc_stats->corr_work_done,
1352                                        cur_work_done);
1353                }
1354        }
1355        return wq_work_done;
1356}
1357
1358static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1359{
1360        int i;
1361        struct fnic_io_req *io_req;
1362        unsigned long flags = 0;
1363        struct scsi_cmnd *sc;
1364        spinlock_t *io_lock;
1365        unsigned long start_time = 0;
1366        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1367
1368        for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1369                if (i == exclude_id)
1370                        continue;
1371
1372                io_lock = fnic_io_lock_tag(fnic, i);
1373                spin_lock_irqsave(io_lock, flags);
1374                sc = scsi_host_find_tag(fnic->lport->host, i);
1375                if (!sc) {
1376                        spin_unlock_irqrestore(io_lock, flags);
1377                        continue;
1378                }
1379
1380                io_req = (struct fnic_io_req *)CMD_SP(sc);
1381                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1382                        !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1383                        /*
1384                         * We will be here only when FW completes reset
1385                         * without sending completions for outstanding ios.
1386                         */
1387                        CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1388                        if (io_req && io_req->dr_done)
1389                                complete(io_req->dr_done);
1390                        else if (io_req && io_req->abts_done)
1391                                complete(io_req->abts_done);
1392                        spin_unlock_irqrestore(io_lock, flags);
1393                        continue;
1394                } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1395                        spin_unlock_irqrestore(io_lock, flags);
1396                        continue;
1397                }
1398                if (!io_req) {
1399                        spin_unlock_irqrestore(io_lock, flags);
1400                        goto cleanup_scsi_cmd;
1401                }
1402
1403                CMD_SP(sc) = NULL;
1404
1405                spin_unlock_irqrestore(io_lock, flags);
1406
1407                /*
1408                 * If there is a scsi_cmnd associated with this io_req, then
1409                 * free the corresponding state
1410                 */
1411                start_time = io_req->start_time;
1412                fnic_release_ioreq_buf(fnic, io_req, sc);
1413                mempool_free(io_req, fnic->io_req_pool);
1414
1415cleanup_scsi_cmd:
1416                sc->result = DID_TRANSPORT_DISRUPTED << 16;
1417                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1418                              "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1419                              __func__, sc->request->tag, sc,
1420                              (jiffies - start_time));
1421
1422                if (atomic64_read(&fnic->io_cmpl_skip))
1423                        atomic64_dec(&fnic->io_cmpl_skip);
1424                else
1425                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1426
1427                /* Complete the command to SCSI */
1428                if (sc->scsi_done) {
1429                        if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
1430                                shost_printk(KERN_ERR, fnic->lport->host,
1431                                "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
1432                                 sc->request->tag, sc);
1433
1434                        FNIC_TRACE(fnic_cleanup_io,
1435                                  sc->device->host->host_no, i, sc,
1436                                  jiffies_to_msecs(jiffies - start_time),
1437                                  0, ((u64)sc->cmnd[0] << 32 |
1438                                  (u64)sc->cmnd[2] << 24 |
1439                                  (u64)sc->cmnd[3] << 16 |
1440                                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1441                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1442
1443                        sc->scsi_done(sc);
1444                }
1445        }
1446}
1447
1448void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1449                                  struct fcpio_host_req *desc)
1450{
1451        u32 id;
1452        struct fnic *fnic = vnic_dev_priv(wq->vdev);
1453        struct fnic_io_req *io_req;
1454        struct scsi_cmnd *sc;
1455        unsigned long flags;
1456        spinlock_t *io_lock;
1457        unsigned long start_time = 0;
1458
1459        /* get the tag reference */
1460        fcpio_tag_id_dec(&desc->hdr.tag, &id);
1461        id &= FNIC_TAG_MASK;
1462
1463        if (id >= fnic->fnic_max_tag_id)
1464                return;
1465
1466        sc = scsi_host_find_tag(fnic->lport->host, id);
1467        if (!sc)
1468                return;
1469
1470        io_lock = fnic_io_lock_hash(fnic, sc);
1471        spin_lock_irqsave(io_lock, flags);
1472
1473        /* Get the IO context which this desc refers to */
1474        io_req = (struct fnic_io_req *)CMD_SP(sc);
1475
1476        /* fnic interrupts are turned off by now */
1477
1478        if (!io_req) {
1479                spin_unlock_irqrestore(io_lock, flags);
1480                goto wq_copy_cleanup_scsi_cmd;
1481        }
1482
1483        CMD_SP(sc) = NULL;
1484
1485        spin_unlock_irqrestore(io_lock, flags);
1486
1487        start_time = io_req->start_time;
1488        fnic_release_ioreq_buf(fnic, io_req, sc);
1489        mempool_free(io_req, fnic->io_req_pool);
1490
1491wq_copy_cleanup_scsi_cmd:
1492        sc->result = DID_NO_CONNECT << 16;
1493        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1494                      " DID_NO_CONNECT\n");
1495
1496        if (sc->scsi_done) {
1497                FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1498                          sc->device->host->host_no, id, sc,
1499                          jiffies_to_msecs(jiffies - start_time),
1500                          0, ((u64)sc->cmnd[0] << 32 |
1501                          (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1502                          (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1503                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1504
1505                sc->scsi_done(sc);
1506        }
1507}
1508
1509static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1510                                          u32 task_req, u8 *fc_lun,
1511                                          struct fnic_io_req *io_req)
1512{
1513        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1514        struct Scsi_Host *host = fnic->lport->host;
1515        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1516        unsigned long flags;
1517
1518        spin_lock_irqsave(host->host_lock, flags);
1519        if (unlikely(fnic_chk_state_flags_locked(fnic,
1520                                                FNIC_FLAGS_IO_BLOCKED))) {
1521                spin_unlock_irqrestore(host->host_lock, flags);
1522                return 1;
1523        } else
1524                atomic_inc(&fnic->in_flight);
1525        spin_unlock_irqrestore(host->host_lock, flags);
1526
1527        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1528
1529        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1530                free_wq_copy_descs(fnic, wq);
1531
1532        if (!vnic_wq_copy_desc_avail(wq)) {
1533                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1534                atomic_dec(&fnic->in_flight);
1535                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1536                        "fnic_queue_abort_io_req: failure: no descriptors\n");
1537                atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1538                return 1;
1539        }
1540        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1541                                     0, task_req, tag, fc_lun, io_req->port_id,
1542                                     fnic->config.ra_tov, fnic->config.ed_tov);
1543
1544        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1545        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1546                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1547                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1548                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1549
1550        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1551        atomic_dec(&fnic->in_flight);
1552
1553        return 0;
1554}
1555
1556static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1557{
1558        int tag;
1559        int abt_tag;
1560        int term_cnt = 0;
1561        struct fnic_io_req *io_req;
1562        spinlock_t *io_lock;
1563        unsigned long flags;
1564        struct scsi_cmnd *sc;
1565        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1566        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1567        struct scsi_lun fc_lun;
1568        enum fnic_ioreq_state old_ioreq_state;
1569
1570        FNIC_SCSI_DBG(KERN_DEBUG,
1571                      fnic->lport->host,
1572                      "fnic_rport_exch_reset called portid 0x%06x\n",
1573                      port_id);
1574
1575        if (fnic->in_remove)
1576                return;
1577
1578        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1579                abt_tag = tag;
1580                io_lock = fnic_io_lock_tag(fnic, tag);
1581                spin_lock_irqsave(io_lock, flags);
1582                sc = scsi_host_find_tag(fnic->lport->host, tag);
1583                if (!sc) {
1584                        spin_unlock_irqrestore(io_lock, flags);
1585                        continue;
1586                }
1587
1588                io_req = (struct fnic_io_req *)CMD_SP(sc);
1589
1590                if (!io_req || io_req->port_id != port_id) {
1591                        spin_unlock_irqrestore(io_lock, flags);
1592                        continue;
1593                }
1594
1595                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1596                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1597                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1598                        "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1599                        sc);
1600                        spin_unlock_irqrestore(io_lock, flags);
1601                        continue;
1602                }
1603
1604                /*
1605                 * Found IO that is still pending with firmware and
1606                 * belongs to rport that went away
1607                 */
1608                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1609                        spin_unlock_irqrestore(io_lock, flags);
1610                        continue;
1611                }
1612                if (io_req->abts_done) {
1613                        shost_printk(KERN_ERR, fnic->lport->host,
1614                        "fnic_rport_exch_reset: io_req->abts_done is set "
1615                        "state is %s\n",
1616                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1617                }
1618
1619                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1620                        shost_printk(KERN_ERR, fnic->lport->host,
1621                                  "rport_exch_reset "
1622                                  "IO not yet issued %p tag 0x%x flags "
1623                                  "%x state %d\n",
1624                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1625                }
1626                old_ioreq_state = CMD_STATE(sc);
1627                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1628                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1629                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1630                        atomic64_inc(&reset_stats->device_reset_terminates);
1631                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1632                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1633                        "fnic_rport_exch_reset dev rst sc 0x%p\n",
1634                        sc);
1635                }
1636
1637                BUG_ON(io_req->abts_done);
1638
1639                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1640                              "fnic_rport_reset_exch: Issuing abts\n");
1641
1642                spin_unlock_irqrestore(io_lock, flags);
1643
1644                /* Now queue the abort command to firmware */
1645                int_to_scsilun(sc->device->lun, &fc_lun);
1646
1647                if (fnic_queue_abort_io_req(fnic, abt_tag,
1648                                            FCPIO_ITMF_ABT_TASK_TERM,
1649                                            fc_lun.scsi_lun, io_req)) {
1650                        /*
1651                         * Revert the cmd state back to old state, if
1652                         * it hasn't changed in between. This cmd will get
1653                         * aborted later by scsi_eh, or cleaned up during
1654                         * lun reset
1655                         */
1656                        spin_lock_irqsave(io_lock, flags);
1657                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1658                                CMD_STATE(sc) = old_ioreq_state;
1659                        spin_unlock_irqrestore(io_lock, flags);
1660                } else {
1661                        spin_lock_irqsave(io_lock, flags);
1662                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1663                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1664                        else
1665                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1666                        spin_unlock_irqrestore(io_lock, flags);
1667                        atomic64_inc(&term_stats->terminates);
1668                        term_cnt++;
1669                }
1670        }
1671        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1672                atomic64_set(&term_stats->max_terminates, term_cnt);
1673
1674}
1675
1676void fnic_terminate_rport_io(struct fc_rport *rport)
1677{
1678        int tag;
1679        int abt_tag;
1680        int term_cnt = 0;
1681        struct fnic_io_req *io_req;
1682        spinlock_t *io_lock;
1683        unsigned long flags;
1684        struct scsi_cmnd *sc;
1685        struct scsi_lun fc_lun;
1686        struct fc_rport_libfc_priv *rdata;
1687        struct fc_lport *lport;
1688        struct fnic *fnic;
1689        struct fc_rport *cmd_rport;
1690        struct reset_stats *reset_stats;
1691        struct terminate_stats *term_stats;
1692        enum fnic_ioreq_state old_ioreq_state;
1693
1694        if (!rport) {
1695                printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1696                return;
1697        }
1698        rdata = rport->dd_data;
1699
1700        if (!rdata) {
1701                printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1702                return;
1703        }
1704        lport = rdata->local_port;
1705
1706        if (!lport) {
1707                printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1708                return;
1709        }
1710        fnic = lport_priv(lport);
1711        FNIC_SCSI_DBG(KERN_DEBUG,
1712                      fnic->lport->host, "fnic_terminate_rport_io called"
1713                      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1714                      rport->port_name, rport->node_name, rport,
1715                      rport->port_id);
1716
1717        if (fnic->in_remove)
1718                return;
1719
1720        reset_stats = &fnic->fnic_stats.reset_stats;
1721        term_stats = &fnic->fnic_stats.term_stats;
1722
1723        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1724                abt_tag = tag;
1725                io_lock = fnic_io_lock_tag(fnic, tag);
1726                spin_lock_irqsave(io_lock, flags);
1727                sc = scsi_host_find_tag(fnic->lport->host, tag);
1728                if (!sc) {
1729                        spin_unlock_irqrestore(io_lock, flags);
1730                        continue;
1731                }
1732
1733                cmd_rport = starget_to_rport(scsi_target(sc->device));
1734                if (rport != cmd_rport) {
1735                        spin_unlock_irqrestore(io_lock, flags);
1736                        continue;
1737                }
1738
1739                io_req = (struct fnic_io_req *)CMD_SP(sc);
1740
1741                if (!io_req || rport != cmd_rport) {
1742                        spin_unlock_irqrestore(io_lock, flags);
1743                        continue;
1744                }
1745
1746                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1747                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1748                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1749                        "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1750                        sc);
1751                        spin_unlock_irqrestore(io_lock, flags);
1752                        continue;
1753                }
1754                /*
1755                 * Found IO that is still pending with firmware and
1756                 * belongs to rport that went away
1757                 */
1758                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1759                        spin_unlock_irqrestore(io_lock, flags);
1760                        continue;
1761                }
1762                if (io_req->abts_done) {
1763                        shost_printk(KERN_ERR, fnic->lport->host,
1764                        "fnic_terminate_rport_io: io_req->abts_done is set "
1765                        "state is %s\n",
1766                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1767                }
1768                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1769                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1770                                  "fnic_terminate_rport_io "
1771                                  "IO not yet issued %p tag 0x%x flags "
1772                                  "%x state %d\n",
1773                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1774                }
1775                old_ioreq_state = CMD_STATE(sc);
1776                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1777                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1778                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1779                        atomic64_inc(&reset_stats->device_reset_terminates);
1780                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1781                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1782                        "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1783                }
1784
1785                BUG_ON(io_req->abts_done);
1786
1787                FNIC_SCSI_DBG(KERN_DEBUG,
1788                              fnic->lport->host,
1789                              "fnic_terminate_rport_io: Issuing abts\n");
1790
1791                spin_unlock_irqrestore(io_lock, flags);
1792
1793                /* Now queue the abort command to firmware */
1794                int_to_scsilun(sc->device->lun, &fc_lun);
1795
1796                if (fnic_queue_abort_io_req(fnic, abt_tag,
1797                                            FCPIO_ITMF_ABT_TASK_TERM,
1798                                            fc_lun.scsi_lun, io_req)) {
1799                        /*
1800                         * Revert the cmd state back to old state, if
1801                         * it hasn't changed in between. This cmd will get
1802                         * aborted later by scsi_eh, or cleaned up during
1803                         * lun reset
1804                         */
1805                        spin_lock_irqsave(io_lock, flags);
1806                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1807                                CMD_STATE(sc) = old_ioreq_state;
1808                        spin_unlock_irqrestore(io_lock, flags);
1809                } else {
1810                        spin_lock_irqsave(io_lock, flags);
1811                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1812                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1813                        else
1814                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1815                        spin_unlock_irqrestore(io_lock, flags);
1816                        atomic64_inc(&term_stats->terminates);
1817                        term_cnt++;
1818                }
1819        }
1820        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1821                atomic64_set(&term_stats->max_terminates, term_cnt);
1822
1823}
1824
1825/*
1826 * This function is exported to SCSI for sending abort cmnds.
1827 * A SCSI IO is represented by a io_req in the driver.
1828 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1829 */
1830int fnic_abort_cmd(struct scsi_cmnd *sc)
1831{
1832        struct fc_lport *lp;
1833        struct fnic *fnic;
1834        struct fnic_io_req *io_req = NULL;
1835        struct fc_rport *rport;
1836        spinlock_t *io_lock;
1837        unsigned long flags;
1838        unsigned long start_time = 0;
1839        int ret = SUCCESS;
1840        u32 task_req = 0;
1841        struct scsi_lun fc_lun;
1842        struct fnic_stats *fnic_stats;
1843        struct abort_stats *abts_stats;
1844        struct terminate_stats *term_stats;
1845        enum fnic_ioreq_state old_ioreq_state;
1846        int tag;
1847        unsigned long abt_issued_time;
1848        DECLARE_COMPLETION_ONSTACK(tm_done);
1849
1850        /* Wait for rport to unblock */
1851        fc_block_scsi_eh(sc);
1852
1853        /* Get local-port, check ready and link up */
1854        lp = shost_priv(sc->device->host);
1855
1856        fnic = lport_priv(lp);
1857        fnic_stats = &fnic->fnic_stats;
1858        abts_stats = &fnic->fnic_stats.abts_stats;
1859        term_stats = &fnic->fnic_stats.term_stats;
1860
1861        rport = starget_to_rport(scsi_target(sc->device));
1862        tag = sc->request->tag;
1863        FNIC_SCSI_DBG(KERN_DEBUG,
1864                fnic->lport->host,
1865                "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1866                rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1867
1868        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1869
1870        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1871                ret = FAILED;
1872                goto fnic_abort_cmd_end;
1873        }
1874
1875        /*
1876         * Avoid a race between SCSI issuing the abort and the device
1877         * completing the command.
1878         *
1879         * If the command is already completed by the fw cmpl code,
1880         * we just return SUCCESS from here. This means that the abort
1881         * succeeded. In the SCSI ML, since the timeout for command has
1882         * happened, the completion wont actually complete the command
1883         * and it will be considered as an aborted command
1884         *
1885         * The CMD_SP will not be cleared except while holding io_req_lock.
1886         */
1887        io_lock = fnic_io_lock_hash(fnic, sc);
1888        spin_lock_irqsave(io_lock, flags);
1889        io_req = (struct fnic_io_req *)CMD_SP(sc);
1890        if (!io_req) {
1891                spin_unlock_irqrestore(io_lock, flags);
1892                goto fnic_abort_cmd_end;
1893        }
1894
1895        io_req->abts_done = &tm_done;
1896
1897        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1898                spin_unlock_irqrestore(io_lock, flags);
1899                goto wait_pending;
1900        }
1901
1902        abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1903        if (abt_issued_time <= 6000)
1904                atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1905        else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1906                atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1907        else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1908                atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1909        else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1910                atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1911        else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1912                atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1913        else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1914                atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1915        else
1916                atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1917
1918        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1919                "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1920        /*
1921         * Command is still pending, need to abort it
1922         * If the firmware completes the command after this point,
1923         * the completion wont be done till mid-layer, since abort
1924         * has already started.
1925         */
1926        old_ioreq_state = CMD_STATE(sc);
1927        CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1928        CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1929
1930        spin_unlock_irqrestore(io_lock, flags);
1931
1932        /*
1933         * Check readiness of the remote port. If the path to remote
1934         * port is up, then send abts to the remote port to terminate
1935         * the IO. Else, just locally terminate the IO in the firmware
1936         */
1937        if (fc_remote_port_chkready(rport) == 0)
1938                task_req = FCPIO_ITMF_ABT_TASK;
1939        else {
1940                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1941                task_req = FCPIO_ITMF_ABT_TASK_TERM;
1942        }
1943
1944        /* Now queue the abort command to firmware */
1945        int_to_scsilun(sc->device->lun, &fc_lun);
1946
1947        if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1948                                    fc_lun.scsi_lun, io_req)) {
1949                spin_lock_irqsave(io_lock, flags);
1950                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1951                        CMD_STATE(sc) = old_ioreq_state;
1952                io_req = (struct fnic_io_req *)CMD_SP(sc);
1953                if (io_req)
1954                        io_req->abts_done = NULL;
1955                spin_unlock_irqrestore(io_lock, flags);
1956                ret = FAILED;
1957                goto fnic_abort_cmd_end;
1958        }
1959        if (task_req == FCPIO_ITMF_ABT_TASK) {
1960                CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1961                atomic64_inc(&fnic_stats->abts_stats.aborts);
1962        } else {
1963                CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1964                atomic64_inc(&fnic_stats->term_stats.terminates);
1965        }
1966
1967        /*
1968         * We queued an abort IO, wait for its completion.
1969         * Once the firmware completes the abort command, it will
1970         * wake up this thread.
1971         */
1972 wait_pending:
1973        wait_for_completion_timeout(&tm_done,
1974                                    msecs_to_jiffies
1975                                    (2 * fnic->config.ra_tov +
1976                                     fnic->config.ed_tov));
1977
1978        /* Check the abort status */
1979        spin_lock_irqsave(io_lock, flags);
1980
1981        io_req = (struct fnic_io_req *)CMD_SP(sc);
1982        if (!io_req) {
1983                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1984                spin_unlock_irqrestore(io_lock, flags);
1985                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1986                ret = FAILED;
1987                goto fnic_abort_cmd_end;
1988        }
1989        io_req->abts_done = NULL;
1990
1991        /* fw did not complete abort, timed out */
1992        if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1993                spin_unlock_irqrestore(io_lock, flags);
1994                if (task_req == FCPIO_ITMF_ABT_TASK) {
1995                        atomic64_inc(&abts_stats->abort_drv_timeouts);
1996                } else {
1997                        atomic64_inc(&term_stats->terminate_drv_timeouts);
1998                }
1999                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
2000                ret = FAILED;
2001                goto fnic_abort_cmd_end;
2002        }
2003
2004        /* IO out of order */
2005
2006        if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
2007                spin_unlock_irqrestore(io_lock, flags);
2008                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2009                        "Issuing Host reset due to out of order IO\n");
2010
2011                ret = FAILED;
2012                goto fnic_abort_cmd_end;
2013        }
2014
2015        CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2016
2017        start_time = io_req->start_time;
2018        /*
2019         * firmware completed the abort, check the status,
2020         * free the io_req if successful. If abort fails,
2021         * Device reset will clean the I/O.
2022         */
2023        if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
2024                CMD_SP(sc) = NULL;
2025        else {
2026                ret = FAILED;
2027                spin_unlock_irqrestore(io_lock, flags);
2028                goto fnic_abort_cmd_end;
2029        }
2030
2031        spin_unlock_irqrestore(io_lock, flags);
2032
2033        fnic_release_ioreq_buf(fnic, io_req, sc);
2034        mempool_free(io_req, fnic->io_req_pool);
2035
2036        if (sc->scsi_done) {
2037        /* Call SCSI completion function to complete the IO */
2038                sc->result = (DID_ABORT << 16);
2039                sc->scsi_done(sc);
2040                atomic64_dec(&fnic_stats->io_stats.active_ios);
2041                if (atomic64_read(&fnic->io_cmpl_skip))
2042                        atomic64_dec(&fnic->io_cmpl_skip);
2043                else
2044                        atomic64_inc(&fnic_stats->io_stats.io_completions);
2045        }
2046
2047fnic_abort_cmd_end:
2048        FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
2049                  sc->request->tag, sc,
2050                  jiffies_to_msecs(jiffies - start_time),
2051                  0, ((u64)sc->cmnd[0] << 32 |
2052                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2053                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2054                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2055
2056        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2057                      "Returning from abort cmd type %x %s\n", task_req,
2058                      (ret == SUCCESS) ?
2059                      "SUCCESS" : "FAILED");
2060        return ret;
2061}
2062
2063static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2064                                       struct scsi_cmnd *sc,
2065                                       struct fnic_io_req *io_req)
2066{
2067        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
2068        struct Scsi_Host *host = fnic->lport->host;
2069        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2070        struct scsi_lun fc_lun;
2071        int ret = 0;
2072        unsigned long intr_flags;
2073
2074        spin_lock_irqsave(host->host_lock, intr_flags);
2075        if (unlikely(fnic_chk_state_flags_locked(fnic,
2076                                                FNIC_FLAGS_IO_BLOCKED))) {
2077                spin_unlock_irqrestore(host->host_lock, intr_flags);
2078                return FAILED;
2079        } else
2080                atomic_inc(&fnic->in_flight);
2081        spin_unlock_irqrestore(host->host_lock, intr_flags);
2082
2083        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2084
2085        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2086                free_wq_copy_descs(fnic, wq);
2087
2088        if (!vnic_wq_copy_desc_avail(wq)) {
2089                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2090                          "queue_dr_io_req failure - no descriptors\n");
2091                atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2092                ret = -EAGAIN;
2093                goto lr_io_req_end;
2094        }
2095
2096        /* fill in the lun info */
2097        int_to_scsilun(sc->device->lun, &fc_lun);
2098
2099        fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2100                                     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2101                                     fc_lun.scsi_lun, io_req->port_id,
2102                                     fnic->config.ra_tov, fnic->config.ed_tov);
2103
2104        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2105        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2106                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2107                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2108                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2109
2110lr_io_req_end:
2111        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2112        atomic_dec(&fnic->in_flight);
2113
2114        return ret;
2115}
2116
2117/*
2118 * Clean up any pending aborts on the lun
2119 * For each outstanding IO on this lun, whose abort is not completed by fw,
2120 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2121 * successfully aborted, 1 otherwise
2122 */
2123static int fnic_clean_pending_aborts(struct fnic *fnic,
2124                                     struct scsi_cmnd *lr_sc,
2125                                         bool new_sc)
2126
2127{
2128        int tag, abt_tag;
2129        struct fnic_io_req *io_req;
2130        spinlock_t *io_lock;
2131        unsigned long flags;
2132        int ret = 0;
2133        struct scsi_cmnd *sc;
2134        struct scsi_lun fc_lun;
2135        struct scsi_device *lun_dev = lr_sc->device;
2136        DECLARE_COMPLETION_ONSTACK(tm_done);
2137        enum fnic_ioreq_state old_ioreq_state;
2138
2139        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2140                io_lock = fnic_io_lock_tag(fnic, tag);
2141                spin_lock_irqsave(io_lock, flags);
2142                sc = scsi_host_find_tag(fnic->lport->host, tag);
2143                /*
2144                 * ignore this lun reset cmd if issued using new SC
2145                 * or cmds that do not belong to this lun
2146                 */
2147                if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2148                        spin_unlock_irqrestore(io_lock, flags);
2149                        continue;
2150                }
2151
2152                io_req = (struct fnic_io_req *)CMD_SP(sc);
2153
2154                if (!io_req || sc->device != lun_dev) {
2155                        spin_unlock_irqrestore(io_lock, flags);
2156                        continue;
2157                }
2158
2159                /*
2160                 * Found IO that is still pending with firmware and
2161                 * belongs to the LUN that we are resetting
2162                 */
2163                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2164                              "Found IO in %s on lun\n",
2165                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2166
2167                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2168                        spin_unlock_irqrestore(io_lock, flags);
2169                        continue;
2170                }
2171                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2172                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2173                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2174                                "%s dev rst not pending sc 0x%p\n", __func__,
2175                                sc);
2176                        spin_unlock_irqrestore(io_lock, flags);
2177                        continue;
2178                }
2179
2180                if (io_req->abts_done)
2181                        shost_printk(KERN_ERR, fnic->lport->host,
2182                          "%s: io_req->abts_done is set state is %s\n",
2183                          __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2184                old_ioreq_state = CMD_STATE(sc);
2185                /*
2186                 * Any pending IO issued prior to reset is expected to be
2187                 * in abts pending state, if not we need to set
2188                 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2189                 * When IO is completed, the IO will be handed over and
2190                 * handled in this function.
2191                 */
2192                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2193
2194                BUG_ON(io_req->abts_done);
2195
2196                abt_tag = tag;
2197                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2198                        abt_tag |= FNIC_TAG_DEV_RST;
2199                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2200                                  "%s: dev rst sc 0x%p\n", __func__, sc);
2201                }
2202
2203                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2204                io_req->abts_done = &tm_done;
2205                spin_unlock_irqrestore(io_lock, flags);
2206
2207                /* Now queue the abort command to firmware */
2208                int_to_scsilun(sc->device->lun, &fc_lun);
2209
2210                if (fnic_queue_abort_io_req(fnic, abt_tag,
2211                                            FCPIO_ITMF_ABT_TASK_TERM,
2212                                            fc_lun.scsi_lun, io_req)) {
2213                        spin_lock_irqsave(io_lock, flags);
2214                        io_req = (struct fnic_io_req *)CMD_SP(sc);
2215                        if (io_req)
2216                                io_req->abts_done = NULL;
2217                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2218                                CMD_STATE(sc) = old_ioreq_state;
2219                        spin_unlock_irqrestore(io_lock, flags);
2220                        ret = 1;
2221                        goto clean_pending_aborts_end;
2222                } else {
2223                        spin_lock_irqsave(io_lock, flags);
2224                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2225                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2226                        spin_unlock_irqrestore(io_lock, flags);
2227                }
2228                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2229
2230                wait_for_completion_timeout(&tm_done,
2231                                            msecs_to_jiffies
2232                                            (fnic->config.ed_tov));
2233
2234                /* Recheck cmd state to check if it is now aborted */
2235                spin_lock_irqsave(io_lock, flags);
2236                io_req = (struct fnic_io_req *)CMD_SP(sc);
2237                if (!io_req) {
2238                        spin_unlock_irqrestore(io_lock, flags);
2239                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2240                        continue;
2241                }
2242
2243                io_req->abts_done = NULL;
2244
2245                /* if abort is still pending with fw, fail */
2246                if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2247                        spin_unlock_irqrestore(io_lock, flags);
2248                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2249                        ret = 1;
2250                        goto clean_pending_aborts_end;
2251                }
2252                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2253
2254                /* original sc used for lr is handled by dev reset code */
2255                if (sc != lr_sc)
2256                        CMD_SP(sc) = NULL;
2257                spin_unlock_irqrestore(io_lock, flags);
2258
2259                /* original sc used for lr is handled by dev reset code */
2260                if (sc != lr_sc) {
2261                        fnic_release_ioreq_buf(fnic, io_req, sc);
2262                        mempool_free(io_req, fnic->io_req_pool);
2263                }
2264
2265                /*
2266                 * Any IO is returned during reset, it needs to call scsi_done
2267                 * to return the scsi_cmnd to upper layer.
2268                 */
2269                if (sc->scsi_done) {
2270                        /* Set result to let upper SCSI layer retry */
2271                        sc->result = DID_RESET << 16;
2272                        sc->scsi_done(sc);
2273                }
2274        }
2275
2276        schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2277
2278        /* walk again to check, if IOs are still pending in fw */
2279        if (fnic_is_abts_pending(fnic, lr_sc))
2280                ret = FAILED;
2281
2282clean_pending_aborts_end:
2283        return ret;
2284}
2285
2286/**
2287 * fnic_scsi_host_start_tag
2288 * Allocates tagid from host's tag list
2289 **/
2290static inline int
2291fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2292{
2293        struct request_queue *q = sc->request->q;
2294        struct request *dummy;
2295
2296        dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2297        if (IS_ERR(dummy))
2298                return SCSI_NO_TAG;
2299
2300        sc->tag = sc->request->tag = dummy->tag;
2301        sc->host_scribble = (unsigned char *)dummy;
2302
2303        return dummy->tag;
2304}
2305
2306/**
2307 * fnic_scsi_host_end_tag
2308 * frees tag allocated by fnic_scsi_host_start_tag.
2309 **/
2310static inline void
2311fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2312{
2313        struct request *dummy = (struct request *)sc->host_scribble;
2314
2315        blk_mq_free_request(dummy);
2316}
2317
2318/*
2319 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2320 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2321 * on the LUN.
2322 */
2323int fnic_device_reset(struct scsi_cmnd *sc)
2324{
2325        struct fc_lport *lp;
2326        struct fnic *fnic;
2327        struct fnic_io_req *io_req = NULL;
2328        struct fc_rport *rport;
2329        int status;
2330        int ret = FAILED;
2331        spinlock_t *io_lock;
2332        unsigned long flags;
2333        unsigned long start_time = 0;
2334        struct scsi_lun fc_lun;
2335        struct fnic_stats *fnic_stats;
2336        struct reset_stats *reset_stats;
2337        int tag = 0;
2338        DECLARE_COMPLETION_ONSTACK(tm_done);
2339        int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
2340        bool new_sc = 0;
2341
2342        /* Wait for rport to unblock */
2343        fc_block_scsi_eh(sc);
2344
2345        /* Get local-port, check ready and link up */
2346        lp = shost_priv(sc->device->host);
2347
2348        fnic = lport_priv(lp);
2349        fnic_stats = &fnic->fnic_stats;
2350        reset_stats = &fnic->fnic_stats.reset_stats;
2351
2352        atomic64_inc(&reset_stats->device_resets);
2353
2354        rport = starget_to_rport(scsi_target(sc->device));
2355        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2356                      "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2357                      rport->port_id, sc->device->lun, sc);
2358
2359        if (lp->state != LPORT_ST_READY || !(lp->link_up))
2360                goto fnic_device_reset_end;
2361
2362        /* Check if remote port up */
2363        if (fc_remote_port_chkready(rport)) {
2364                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2365                goto fnic_device_reset_end;
2366        }
2367
2368        CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2369        /* Allocate tag if not present */
2370
2371        tag = sc->request->tag;
2372        if (unlikely(tag < 0)) {
2373                /*
2374                 * Really should fix the midlayer to pass in a proper
2375                 * request for ioctls...
2376                 */
2377                tag = fnic_scsi_host_start_tag(fnic, sc);
2378                if (unlikely(tag == SCSI_NO_TAG))
2379                        goto fnic_device_reset_end;
2380                tag_gen_flag = 1;
2381                new_sc = 1;
2382        }
2383        io_lock = fnic_io_lock_hash(fnic, sc);
2384        spin_lock_irqsave(io_lock, flags);
2385        io_req = (struct fnic_io_req *)CMD_SP(sc);
2386
2387        /*
2388         * If there is a io_req attached to this command, then use it,
2389         * else allocate a new one.
2390         */
2391        if (!io_req) {
2392                io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2393                if (!io_req) {
2394                        spin_unlock_irqrestore(io_lock, flags);
2395                        goto fnic_device_reset_end;
2396                }
2397                memset(io_req, 0, sizeof(*io_req));
2398                io_req->port_id = rport->port_id;
2399                CMD_SP(sc) = (char *)io_req;
2400        }
2401        io_req->dr_done = &tm_done;
2402        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2403        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2404        spin_unlock_irqrestore(io_lock, flags);
2405
2406        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2407
2408        /*
2409         * issue the device reset, if enqueue failed, clean up the ioreq
2410         * and break assoc with scsi cmd
2411         */
2412        if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2413                spin_lock_irqsave(io_lock, flags);
2414                io_req = (struct fnic_io_req *)CMD_SP(sc);
2415                if (io_req)
2416                        io_req->dr_done = NULL;
2417                goto fnic_device_reset_clean;
2418        }
2419        spin_lock_irqsave(io_lock, flags);
2420        CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2421        spin_unlock_irqrestore(io_lock, flags);
2422
2423        /*
2424         * Wait on the local completion for LUN reset.  The io_req may be
2425         * freed while we wait since we hold no lock.
2426         */
2427        wait_for_completion_timeout(&tm_done,
2428                                    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2429
2430        spin_lock_irqsave(io_lock, flags);
2431        io_req = (struct fnic_io_req *)CMD_SP(sc);
2432        if (!io_req) {
2433                spin_unlock_irqrestore(io_lock, flags);
2434                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2435                                "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2436                goto fnic_device_reset_end;
2437        }
2438        io_req->dr_done = NULL;
2439
2440        status = CMD_LR_STATUS(sc);
2441
2442        /*
2443         * If lun reset not completed, bail out with failed. io_req
2444         * gets cleaned up during higher levels of EH
2445         */
2446        if (status == FCPIO_INVALID_CODE) {
2447                atomic64_inc(&reset_stats->device_reset_timeouts);
2448                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2449                              "Device reset timed out\n");
2450                CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2451                spin_unlock_irqrestore(io_lock, flags);
2452                int_to_scsilun(sc->device->lun, &fc_lun);
2453                /*
2454                 * Issue abort and terminate on device reset request.
2455                 * If q'ing of terminate fails, retry it after a delay.
2456                 */
2457                while (1) {
2458                        spin_lock_irqsave(io_lock, flags);
2459                        if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2460                                spin_unlock_irqrestore(io_lock, flags);
2461                                break;
2462                        }
2463                        spin_unlock_irqrestore(io_lock, flags);
2464                        if (fnic_queue_abort_io_req(fnic,
2465                                tag | FNIC_TAG_DEV_RST,
2466                                FCPIO_ITMF_ABT_TASK_TERM,
2467                                fc_lun.scsi_lun, io_req)) {
2468                                wait_for_completion_timeout(&tm_done,
2469                                msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2470                        } else {
2471                                spin_lock_irqsave(io_lock, flags);
2472                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2473                                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2474                                io_req->abts_done = &tm_done;
2475                                spin_unlock_irqrestore(io_lock, flags);
2476                                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2477                                "Abort and terminate issued on Device reset "
2478                                "tag 0x%x sc 0x%p\n", tag, sc);
2479                                break;
2480                        }
2481                }
2482                while (1) {
2483                        spin_lock_irqsave(io_lock, flags);
2484                        if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2485                                spin_unlock_irqrestore(io_lock, flags);
2486                                wait_for_completion_timeout(&tm_done,
2487                                msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2488                                break;
2489                        } else {
2490                                io_req = (struct fnic_io_req *)CMD_SP(sc);
2491                                io_req->abts_done = NULL;
2492                                goto fnic_device_reset_clean;
2493                        }
2494                }
2495        } else {
2496                spin_unlock_irqrestore(io_lock, flags);
2497        }
2498
2499        /* Completed, but not successful, clean up the io_req, return fail */
2500        if (status != FCPIO_SUCCESS) {
2501                spin_lock_irqsave(io_lock, flags);
2502                FNIC_SCSI_DBG(KERN_DEBUG,
2503                              fnic->lport->host,
2504                              "Device reset completed - failed\n");
2505                io_req = (struct fnic_io_req *)CMD_SP(sc);
2506                goto fnic_device_reset_clean;
2507        }
2508
2509        /*
2510         * Clean up any aborts on this lun that have still not
2511         * completed. If any of these fail, then LUN reset fails.
2512         * clean_pending_aborts cleans all cmds on this lun except
2513         * the lun reset cmd. If all cmds get cleaned, the lun reset
2514         * succeeds
2515         */
2516        if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2517                spin_lock_irqsave(io_lock, flags);
2518                io_req = (struct fnic_io_req *)CMD_SP(sc);
2519                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2520                              "Device reset failed"
2521                              " since could not abort all IOs\n");
2522                goto fnic_device_reset_clean;
2523        }
2524
2525        /* Clean lun reset command */
2526        spin_lock_irqsave(io_lock, flags);
2527        io_req = (struct fnic_io_req *)CMD_SP(sc);
2528        if (io_req)
2529                /* Completed, and successful */
2530                ret = SUCCESS;
2531
2532fnic_device_reset_clean:
2533        if (io_req)
2534                CMD_SP(sc) = NULL;
2535
2536        spin_unlock_irqrestore(io_lock, flags);
2537
2538        if (io_req) {
2539                start_time = io_req->start_time;
2540                fnic_release_ioreq_buf(fnic, io_req, sc);
2541                mempool_free(io_req, fnic->io_req_pool);
2542        }
2543
2544fnic_device_reset_end:
2545        FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2546                  sc->request->tag, sc,
2547                  jiffies_to_msecs(jiffies - start_time),
2548                  0, ((u64)sc->cmnd[0] << 32 |
2549                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2550                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2551                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2552
2553        /* free tag if it is allocated */
2554        if (unlikely(tag_gen_flag))
2555                fnic_scsi_host_end_tag(fnic, sc);
2556
2557        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2558                      "Returning from device reset %s\n",
2559                      (ret == SUCCESS) ?
2560                      "SUCCESS" : "FAILED");
2561
2562        if (ret == FAILED)
2563                atomic64_inc(&reset_stats->device_reset_failures);
2564
2565        return ret;
2566}
2567
2568/* Clean up all IOs, clean up libFC local port */
2569int fnic_reset(struct Scsi_Host *shost)
2570{
2571        struct fc_lport *lp;
2572        struct fnic *fnic;
2573        int ret = 0;
2574        struct reset_stats *reset_stats;
2575
2576        lp = shost_priv(shost);
2577        fnic = lport_priv(lp);
2578        reset_stats = &fnic->fnic_stats.reset_stats;
2579
2580        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2581                      "fnic_reset called\n");
2582
2583        atomic64_inc(&reset_stats->fnic_resets);
2584
2585        /*
2586         * Reset local port, this will clean up libFC exchanges,
2587         * reset remote port sessions, and if link is up, begin flogi
2588         */
2589        ret = fc_lport_reset(lp);
2590
2591        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2592                      "Returning from fnic reset %s\n",
2593                      (ret == 0) ?
2594                      "SUCCESS" : "FAILED");
2595
2596        if (ret == 0)
2597                atomic64_inc(&reset_stats->fnic_reset_completions);
2598        else
2599                atomic64_inc(&reset_stats->fnic_reset_failures);
2600
2601        return ret;
2602}
2603
2604/*
2605 * SCSI Error handling calls driver's eh_host_reset if all prior
2606 * error handling levels return FAILED. If host reset completes
2607 * successfully, and if link is up, then Fabric login begins.
2608 *
2609 * Host Reset is the highest level of error recovery. If this fails, then
2610 * host is offlined by SCSI.
2611 *
2612 */
2613int fnic_host_reset(struct scsi_cmnd *sc)
2614{
2615        int ret;
2616        unsigned long wait_host_tmo;
2617        struct Scsi_Host *shost = sc->device->host;
2618        struct fc_lport *lp = shost_priv(shost);
2619        struct fnic *fnic = lport_priv(lp);
2620        unsigned long flags;
2621
2622        spin_lock_irqsave(&fnic->fnic_lock, flags);
2623        if (fnic->internal_reset_inprogress == 0) {
2624                fnic->internal_reset_inprogress = 1;
2625        } else {
2626                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2627                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2628                        "host reset in progress skipping another host reset\n");
2629                return SUCCESS;
2630        }
2631        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2632
2633        /*
2634         * If fnic_reset is successful, wait for fabric login to complete
2635         * scsi-ml tries to send a TUR to every device if host reset is
2636         * successful, so before returning to scsi, fabric should be up
2637         */
2638        ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2639        if (ret == SUCCESS) {
2640                wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2641                ret = FAILED;
2642                while (time_before(jiffies, wait_host_tmo)) {
2643                        if ((lp->state == LPORT_ST_READY) &&
2644                            (lp->link_up)) {
2645                                ret = SUCCESS;
2646                                break;
2647                        }
2648                        ssleep(1);
2649                }
2650        }
2651
2652        spin_lock_irqsave(&fnic->fnic_lock, flags);
2653        fnic->internal_reset_inprogress = 0;
2654        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2655        return ret;
2656}
2657
2658/*
2659 * This fxn is called from libFC when host is removed
2660 */
2661void fnic_scsi_abort_io(struct fc_lport *lp)
2662{
2663        int err = 0;
2664        unsigned long flags;
2665        enum fnic_state old_state;
2666        struct fnic *fnic = lport_priv(lp);
2667        DECLARE_COMPLETION_ONSTACK(remove_wait);
2668
2669        /* Issue firmware reset for fnic, wait for reset to complete */
2670retry_fw_reset:
2671        spin_lock_irqsave(&fnic->fnic_lock, flags);
2672        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2673                /* fw reset is in progress, poll for its completion */
2674                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2675                schedule_timeout(msecs_to_jiffies(100));
2676                goto retry_fw_reset;
2677        }
2678
2679        fnic->remove_wait = &remove_wait;
2680        old_state = fnic->state;
2681        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2682        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2683        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2684
2685        err = fnic_fw_reset_handler(fnic);
2686        if (err) {
2687                spin_lock_irqsave(&fnic->fnic_lock, flags);
2688                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2689                        fnic->state = old_state;
2690                fnic->remove_wait = NULL;
2691                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2692                return;
2693        }
2694
2695        /* Wait for firmware reset to complete */
2696        wait_for_completion_timeout(&remove_wait,
2697                                    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2698
2699        spin_lock_irqsave(&fnic->fnic_lock, flags);
2700        fnic->remove_wait = NULL;
2701        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2702                      "fnic_scsi_abort_io %s\n",
2703                      (fnic->state == FNIC_IN_ETH_MODE) ?
2704                      "SUCCESS" : "FAILED");
2705        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2706
2707}
2708
2709/*
2710 * This fxn called from libFC to clean up driver IO state on link down
2711 */
2712void fnic_scsi_cleanup(struct fc_lport *lp)
2713{
2714        unsigned long flags;
2715        enum fnic_state old_state;
2716        struct fnic *fnic = lport_priv(lp);
2717
2718        /* issue fw reset */
2719retry_fw_reset:
2720        spin_lock_irqsave(&fnic->fnic_lock, flags);
2721        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2722                /* fw reset is in progress, poll for its completion */
2723                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2724                schedule_timeout(msecs_to_jiffies(100));
2725                goto retry_fw_reset;
2726        }
2727        old_state = fnic->state;
2728        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2729        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2730        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2731
2732        if (fnic_fw_reset_handler(fnic)) {
2733                spin_lock_irqsave(&fnic->fnic_lock, flags);
2734                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2735                        fnic->state = old_state;
2736                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2737        }
2738
2739}
2740
2741void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2742{
2743}
2744
2745void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2746{
2747        struct fnic *fnic = lport_priv(lp);
2748
2749        /* Non-zero sid, nothing to do */
2750        if (sid)
2751                goto call_fc_exch_mgr_reset;
2752
2753        if (did) {
2754                fnic_rport_exch_reset(fnic, did);
2755                goto call_fc_exch_mgr_reset;
2756        }
2757
2758        /*
2759         * sid = 0, did = 0
2760         * link down or device being removed
2761         */
2762        if (!fnic->in_remove)
2763                fnic_scsi_cleanup(lp);
2764        else
2765                fnic_scsi_abort_io(lp);
2766
2767        /* call libFC exch mgr reset to reset its exchanges */
2768call_fc_exch_mgr_reset:
2769        fc_exch_mgr_reset(lp, sid, did);
2770
2771}
2772
2773/*
2774 * fnic_is_abts_pending() is a helper function that
2775 * walks through tag map to check if there is any IOs pending,if there is one,
2776 * then it returns 1 (true), otherwise 0 (false)
2777 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2778 * otherwise, it checks for all IOs.
2779 */
2780int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2781{
2782        int tag;
2783        struct fnic_io_req *io_req;
2784        spinlock_t *io_lock;
2785        unsigned long flags;
2786        int ret = 0;
2787        struct scsi_cmnd *sc;
2788        struct scsi_device *lun_dev = NULL;
2789
2790        if (lr_sc)
2791                lun_dev = lr_sc->device;
2792
2793        /* walk again to check, if IOs are still pending in fw */
2794        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2795                sc = scsi_host_find_tag(fnic->lport->host, tag);
2796                /*
2797                 * ignore this lun reset cmd or cmds that do not belong to
2798                 * this lun
2799                 */
2800                if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2801                        continue;
2802
2803                io_lock = fnic_io_lock_hash(fnic, sc);
2804                spin_lock_irqsave(io_lock, flags);
2805
2806                io_req = (struct fnic_io_req *)CMD_SP(sc);
2807
2808                if (!io_req || sc->device != lun_dev) {
2809                        spin_unlock_irqrestore(io_lock, flags);
2810                        continue;
2811                }
2812
2813                /*
2814                 * Found IO that is still pending with firmware and
2815                 * belongs to the LUN that we are resetting
2816                 */
2817                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2818                              "Found IO in %s on lun\n",
2819                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2820
2821                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2822                        ret = 1;
2823                spin_unlock_irqrestore(io_lock, flags);
2824        }
2825
2826        return ret;
2827}
2828