linux/drivers/scsi/fnic/fnic_scsi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#include <linux/mempool.h>
  19#include <linux/errno.h>
  20#include <linux/init.h>
  21#include <linux/workqueue.h>
  22#include <linux/pci.h>
  23#include <linux/scatterlist.h>
  24#include <linux/skbuff.h>
  25#include <linux/spinlock.h>
  26#include <linux/if_ether.h>
  27#include <linux/if_vlan.h>
  28#include <linux/delay.h>
  29#include <linux/gfp.h>
  30#include <scsi/scsi.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34#include <scsi/scsi_tcq.h>
  35#include <scsi/fc/fc_els.h>
  36#include <scsi/fc/fc_fcoe.h>
  37#include <scsi/libfc.h>
  38#include <scsi/fc_frame.h>
  39#include "fnic_io.h"
  40#include "fnic.h"
  41
  42const char *fnic_state_str[] = {
  43        [FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
  44        [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  45        [FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
  46        [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  47};
  48
  49static const char *fnic_ioreq_state_str[] = {
  50        [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
  51        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  52        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  53        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  54        [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  55};
  56
  57static const char *fcpio_status_str[] =  {
  58        [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  59        [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  60        [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  61        [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  62        [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  63        [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  64        [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  65        [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  66        [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  67        [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  68        [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  69        [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  70        [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  71        [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  72        [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  73        [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  74        [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  75        [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  76        [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  77};
  78
  79const char *fnic_state_to_str(unsigned int state)
  80{
  81        if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  82                return "unknown";
  83
  84        return fnic_state_str[state];
  85}
  86
  87static const char *fnic_ioreq_state_to_str(unsigned int state)
  88{
  89        if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  90            !fnic_ioreq_state_str[state])
  91                return "unknown";
  92
  93        return fnic_ioreq_state_str[state];
  94}
  95
  96static const char *fnic_fcpio_status_to_str(unsigned int status)
  97{
  98        if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
  99                return "unknown";
 100
 101        return fcpio_status_str[status];
 102}
 103
 104static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
 105
 106static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
 107                                            struct scsi_cmnd *sc)
 108{
 109        u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
 110
 111        return &fnic->io_req_lock[hash];
 112}
 113
 114static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
 115                                            int tag)
 116{
 117        return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
 118}
 119
 120/*
 121 * Unmap the data buffer and sense buffer for an io_req,
 122 * also unmap and free the device-private scatter/gather list.
 123 */
 124static void fnic_release_ioreq_buf(struct fnic *fnic,
 125                                   struct fnic_io_req *io_req,
 126                                   struct scsi_cmnd *sc)
 127{
 128        if (io_req->sgl_list_pa)
 129                pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
 130                                 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
 131                                 PCI_DMA_TODEVICE);
 132        scsi_dma_unmap(sc);
 133
 134        if (io_req->sgl_cnt)
 135                mempool_free(io_req->sgl_list_alloc,
 136                             fnic->io_sgl_pool[io_req->sgl_type]);
 137        if (io_req->sense_buf_pa)
 138                pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
 139                                 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
 140}
 141
 142/* Free up Copy Wq descriptors. Called with copy_wq lock held */
 143static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 144{
 145        /* if no Ack received from firmware, then nothing to clean */
 146        if (!fnic->fw_ack_recd[0])
 147                return 1;
 148
 149        /*
 150         * Update desc_available count based on number of freed descriptors
 151         * Account for wraparound
 152         */
 153        if (wq->to_clean_index <= fnic->fw_ack_index[0])
 154                wq->ring.desc_avail += (fnic->fw_ack_index[0]
 155                                        - wq->to_clean_index + 1);
 156        else
 157                wq->ring.desc_avail += (wq->ring.desc_count
 158                                        - wq->to_clean_index
 159                                        + fnic->fw_ack_index[0] + 1);
 160
 161        /*
 162         * just bump clean index to ack_index+1 accounting for wraparound
 163         * this will essentially free up all descriptors between
 164         * to_clean_index and fw_ack_index, both inclusive
 165         */
 166        wq->to_clean_index =
 167                (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
 168
 169        /* we have processed the acks received so far */
 170        fnic->fw_ack_recd[0] = 0;
 171        return 0;
 172}
 173
 174
 175/**
 176 * __fnic_set_state_flags
 177 * Sets/Clears bits in fnic's state_flags
 178 **/
 179void
 180__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
 181                        unsigned long clearbits)
 182{
 183        struct Scsi_Host *host = fnic->lport->host;
 184        int sh_locked = spin_is_locked(host->host_lock);
 185        unsigned long flags = 0;
 186
 187        if (!sh_locked)
 188                spin_lock_irqsave(host->host_lock, flags);
 189
 190        if (clearbits)
 191                fnic->state_flags &= ~st_flags;
 192        else
 193                fnic->state_flags |= st_flags;
 194
 195        if (!sh_locked)
 196                spin_unlock_irqrestore(host->host_lock, flags);
 197
 198        return;
 199}
 200
 201
 202/*
 203 * fnic_fw_reset_handler
 204 * Routine to send reset msg to fw
 205 */
 206int fnic_fw_reset_handler(struct fnic *fnic)
 207{
 208        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 209        int ret = 0;
 210        unsigned long flags;
 211
 212        /* indicate fwreset to io path */
 213        fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
 214
 215        skb_queue_purge(&fnic->frame_queue);
 216        skb_queue_purge(&fnic->tx_queue);
 217
 218        /* wait for io cmpl */
 219        while (atomic_read(&fnic->in_flight))
 220                schedule_timeout(msecs_to_jiffies(1));
 221
 222        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 223
 224        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 225                free_wq_copy_descs(fnic, wq);
 226
 227        if (!vnic_wq_copy_desc_avail(wq))
 228                ret = -EAGAIN;
 229        else {
 230                fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
 231                atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 232                if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 233                          atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 234                        atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 235                                atomic64_read(
 236                                  &fnic->fnic_stats.fw_stats.active_fw_reqs));
 237        }
 238
 239        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 240
 241        if (!ret) {
 242                atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
 243                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 244                              "Issued fw reset\n");
 245        } else {
 246                fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 247                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 248                              "Failed to issue fw reset\n");
 249        }
 250
 251        return ret;
 252}
 253
 254
 255/*
 256 * fnic_flogi_reg_handler
 257 * Routine to send flogi register msg to fw
 258 */
 259int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
 260{
 261        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 262        enum fcpio_flogi_reg_format_type format;
 263        struct fc_lport *lp = fnic->lport;
 264        u8 gw_mac[ETH_ALEN];
 265        int ret = 0;
 266        unsigned long flags;
 267
 268        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 269
 270        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 271                free_wq_copy_descs(fnic, wq);
 272
 273        if (!vnic_wq_copy_desc_avail(wq)) {
 274                ret = -EAGAIN;
 275                goto flogi_reg_ioreq_end;
 276        }
 277
 278        if (fnic->ctlr.map_dest) {
 279                memset(gw_mac, 0xff, ETH_ALEN);
 280                format = FCPIO_FLOGI_REG_DEF_DEST;
 281        } else {
 282                memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
 283                format = FCPIO_FLOGI_REG_GW_DEST;
 284        }
 285
 286        if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
 287                fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
 288                                                fc_id, gw_mac,
 289                                                fnic->data_src_addr,
 290                                                lp->r_a_tov, lp->e_d_tov);
 291                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 292                              "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
 293                              fc_id, fnic->data_src_addr, gw_mac);
 294        } else {
 295                fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
 296                                                  format, fc_id, gw_mac);
 297                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 298                              "FLOGI reg issued fcid %x map %d dest %pM\n",
 299                              fc_id, fnic->ctlr.map_dest, gw_mac);
 300        }
 301
 302        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 303        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 304                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 305                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 306                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 307
 308flogi_reg_ioreq_end:
 309        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 310        return ret;
 311}
 312
 313/*
 314 * fnic_queue_wq_copy_desc
 315 * Routine to enqueue a wq copy desc
 316 */
 317static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 318                                          struct vnic_wq_copy *wq,
 319                                          struct fnic_io_req *io_req,
 320                                          struct scsi_cmnd *sc,
 321                                          int sg_count)
 322{
 323        struct scatterlist *sg;
 324        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 325        struct fc_rport_libfc_priv *rp = rport->dd_data;
 326        struct host_sg_desc *desc;
 327        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 328        u8 pri_tag = 0;
 329        unsigned int i;
 330        unsigned long intr_flags;
 331        int flags;
 332        u8 exch_flags;
 333        struct scsi_lun fc_lun;
 334        char msg[2];
 335        int r;
 336
 337        if (sg_count) {
 338                /* For each SGE, create a device desc entry */
 339                desc = io_req->sgl_list;
 340                for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
 341                        desc->addr = cpu_to_le64(sg_dma_address(sg));
 342                        desc->len = cpu_to_le32(sg_dma_len(sg));
 343                        desc->_resvd = 0;
 344                        desc++;
 345                }
 346
 347                io_req->sgl_list_pa = pci_map_single
 348                        (fnic->pdev,
 349                         io_req->sgl_list,
 350                         sizeof(io_req->sgl_list[0]) * sg_count,
 351                         PCI_DMA_TODEVICE);
 352
 353                r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
 354                if (r) {
 355                        printk(KERN_ERR "PCI mapping failed with error %d\n", r);
 356                        return SCSI_MLQUEUE_HOST_BUSY;
 357                }
 358        }
 359
 360        io_req->sense_buf_pa = pci_map_single(fnic->pdev,
 361                                              sc->sense_buffer,
 362                                              SCSI_SENSE_BUFFERSIZE,
 363                                              PCI_DMA_FROMDEVICE);
 364
 365        r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
 366        if (r) {
 367                pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
 368                                sizeof(io_req->sgl_list[0]) * sg_count,
 369                                PCI_DMA_TODEVICE);
 370                printk(KERN_ERR "PCI mapping failed with error %d\n", r);
 371                return SCSI_MLQUEUE_HOST_BUSY;
 372        }
 373
 374        int_to_scsilun(sc->device->lun, &fc_lun);
 375
 376        pri_tag = FCPIO_ICMND_PTA_SIMPLE;
 377        msg[0] = MSG_SIMPLE_TAG;
 378        scsi_populate_tag_msg(sc, msg);
 379        if (msg[0] == MSG_ORDERED_TAG)
 380                pri_tag = FCPIO_ICMND_PTA_ORDERED;
 381
 382        /* Enqueue the descriptor in the Copy WQ */
 383        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 384
 385        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 386                free_wq_copy_descs(fnic, wq);
 387
 388        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
 389                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 390                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 391                          "fnic_queue_wq_copy_desc failure - no descriptors\n");
 392                atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
 393                return SCSI_MLQUEUE_HOST_BUSY;
 394        }
 395
 396        flags = 0;
 397        if (sc->sc_data_direction == DMA_FROM_DEVICE)
 398                flags = FCPIO_ICMND_RDDATA;
 399        else if (sc->sc_data_direction == DMA_TO_DEVICE)
 400                flags = FCPIO_ICMND_WRDATA;
 401
 402        exch_flags = 0;
 403        if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
 404            (rp->flags & FC_RP_FLAGS_RETRY))
 405                exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
 406
 407        fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
 408                                         0, exch_flags, io_req->sgl_cnt,
 409                                         SCSI_SENSE_BUFFERSIZE,
 410                                         io_req->sgl_list_pa,
 411                                         io_req->sense_buf_pa,
 412                                         0, /* scsi cmd ref, always 0 */
 413                                         pri_tag, /* scsi pri and tag */
 414                                         flags, /* command flags */
 415                                         sc->cmnd, sc->cmd_len,
 416                                         scsi_bufflen(sc),
 417                                         fc_lun.scsi_lun, io_req->port_id,
 418                                         rport->maxframe_size, rp->r_a_tov,
 419                                         rp->e_d_tov);
 420
 421        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 422        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 423                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 424                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 425                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 426
 427        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 428        return 0;
 429}
 430
 431/*
 432 * fnic_queuecommand
 433 * Routine to send a scsi cdb
 434 * Called with host_lock held and interrupts disabled.
 435 */
 436static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 437{
 438        struct fc_lport *lp = shost_priv(sc->device->host);
 439        struct fc_rport *rport;
 440        struct fnic_io_req *io_req = NULL;
 441        struct fnic *fnic = lport_priv(lp);
 442        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 443        struct vnic_wq_copy *wq;
 444        int ret;
 445        u64 cmd_trace;
 446        int sg_count = 0;
 447        unsigned long flags = 0;
 448        unsigned long ptr;
 449        spinlock_t *io_lock = NULL;
 450        int io_lock_acquired = 0;
 451        struct fc_rport_libfc_priv *rp;
 452
 453        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
 454                return SCSI_MLQUEUE_HOST_BUSY;
 455
 456        rport = starget_to_rport(scsi_target(sc->device));
 457        if (!rport) {
 458                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 459                                "returning DID_NO_CONNECT for IO as rport is NULL\n");
 460                sc->result = DID_NO_CONNECT << 16;
 461                done(sc);
 462                return 0;
 463        }
 464
 465        ret = fc_remote_port_chkready(rport);
 466        if (ret) {
 467                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 468                                "rport is not ready\n");
 469                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 470                sc->result = ret;
 471                done(sc);
 472                return 0;
 473        }
 474
 475        rp = rport->dd_data;
 476        if (!rp || rp->rp_state == RPORT_ST_DELETE) {
 477                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 478                        "rport 0x%x removed, returning DID_NO_CONNECT\n",
 479                        rport->port_id);
 480
 481                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 482                sc->result = DID_NO_CONNECT<<16;
 483                done(sc);
 484                return 0;
 485        }
 486
 487        if (rp->rp_state != RPORT_ST_READY) {
 488                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 489                        "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
 490                        rport->port_id, rp->rp_state);
 491
 492                sc->result = DID_IMM_RETRY << 16;
 493                done(sc);
 494                return 0;
 495        }
 496
 497        if (lp->state != LPORT_ST_READY || !(lp->link_up))
 498                return SCSI_MLQUEUE_HOST_BUSY;
 499
 500        atomic_inc(&fnic->in_flight);
 501
 502        /*
 503         * Release host lock, use driver resource specific locks from here.
 504         * Don't re-enable interrupts in case they were disabled prior to the
 505         * caller disabling them.
 506         */
 507        spin_unlock(lp->host->host_lock);
 508        CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
 509        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 510
 511        /* Get a new io_req for this SCSI IO */
 512        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
 513        if (!io_req) {
 514                atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 515                ret = SCSI_MLQUEUE_HOST_BUSY;
 516                goto out;
 517        }
 518        memset(io_req, 0, sizeof(*io_req));
 519
 520        /* Map the data buffer */
 521        sg_count = scsi_dma_map(sc);
 522        if (sg_count < 0) {
 523                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 524                          sc->request->tag, sc, 0, sc->cmnd[0],
 525                          sg_count, CMD_STATE(sc));
 526                mempool_free(io_req, fnic->io_req_pool);
 527                goto out;
 528        }
 529
 530        /* Determine the type of scatter/gather list we need */
 531        io_req->sgl_cnt = sg_count;
 532        io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
 533        if (sg_count > FNIC_DFLT_SG_DESC_CNT)
 534                io_req->sgl_type = FNIC_SGL_CACHE_MAX;
 535
 536        if (sg_count) {
 537                io_req->sgl_list =
 538                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
 539                                      GFP_ATOMIC);
 540                if (!io_req->sgl_list) {
 541                        atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 542                        ret = SCSI_MLQUEUE_HOST_BUSY;
 543                        scsi_dma_unmap(sc);
 544                        mempool_free(io_req, fnic->io_req_pool);
 545                        goto out;
 546                }
 547
 548                /* Cache sgl list allocated address before alignment */
 549                io_req->sgl_list_alloc = io_req->sgl_list;
 550                ptr = (unsigned long) io_req->sgl_list;
 551                if (ptr % FNIC_SG_DESC_ALIGN) {
 552                        io_req->sgl_list = (struct host_sg_desc *)
 553                                (((unsigned long) ptr
 554                                  + FNIC_SG_DESC_ALIGN - 1)
 555                                 & ~(FNIC_SG_DESC_ALIGN - 1));
 556                }
 557        }
 558
 559        /*
 560        * Will acquire lock defore setting to IO initialized.
 561        */
 562
 563        io_lock = fnic_io_lock_hash(fnic, sc);
 564        spin_lock_irqsave(io_lock, flags);
 565
 566        /* initialize rest of io_req */
 567        io_lock_acquired = 1;
 568        io_req->port_id = rport->port_id;
 569        io_req->start_time = jiffies;
 570        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
 571        CMD_SP(sc) = (char *)io_req;
 572        CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
 573        sc->scsi_done = done;
 574
 575        /* create copy wq desc and enqueue it */
 576        wq = &fnic->wq_copy[0];
 577        ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
 578        if (ret) {
 579                /*
 580                 * In case another thread cancelled the request,
 581                 * refetch the pointer under the lock.
 582                 */
 583                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 584                          sc->request->tag, sc, 0, 0, 0,
 585                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
 586                io_req = (struct fnic_io_req *)CMD_SP(sc);
 587                CMD_SP(sc) = NULL;
 588                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 589                spin_unlock_irqrestore(io_lock, flags);
 590                if (io_req) {
 591                        fnic_release_ioreq_buf(fnic, io_req, sc);
 592                        mempool_free(io_req, fnic->io_req_pool);
 593                }
 594                atomic_dec(&fnic->in_flight);
 595                /* acquire host lock before returning to SCSI */
 596                spin_lock(lp->host->host_lock);
 597                return ret;
 598        } else {
 599                atomic64_inc(&fnic_stats->io_stats.active_ios);
 600                atomic64_inc(&fnic_stats->io_stats.num_ios);
 601                if (atomic64_read(&fnic_stats->io_stats.active_ios) >
 602                          atomic64_read(&fnic_stats->io_stats.max_active_ios))
 603                        atomic64_set(&fnic_stats->io_stats.max_active_ios,
 604                             atomic64_read(&fnic_stats->io_stats.active_ios));
 605
 606                /* REVISIT: Use per IO lock in the final code */
 607                CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
 608        }
 609out:
 610        cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
 611                        (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
 612                        (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
 613                        sc->cmnd[5]);
 614
 615        FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 616                  sc->request->tag, sc, io_req,
 617                  sg_count, cmd_trace,
 618                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 619
 620        /* if only we issued IO, will we have the io lock */
 621        if (io_lock_acquired)
 622                spin_unlock_irqrestore(io_lock, flags);
 623
 624        atomic_dec(&fnic->in_flight);
 625        /* acquire host lock before returning to SCSI */
 626        spin_lock(lp->host->host_lock);
 627        return ret;
 628}
 629
 630DEF_SCSI_QCMD(fnic_queuecommand)
 631
 632/*
 633 * fnic_fcpio_fw_reset_cmpl_handler
 634 * Routine to handle fw reset completion
 635 */
 636static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 637                                            struct fcpio_fw_req *desc)
 638{
 639        u8 type;
 640        u8 hdr_status;
 641        struct fcpio_tag tag;
 642        int ret = 0;
 643        unsigned long flags;
 644        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
 645
 646        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 647
 648        atomic64_inc(&reset_stats->fw_reset_completions);
 649
 650        /* Clean up all outstanding io requests */
 651        fnic_cleanup_io(fnic, SCSI_NO_TAG);
 652
 653        atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
 654        atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
 655        atomic64_set(&fnic->io_cmpl_skip, 0);
 656
 657        spin_lock_irqsave(&fnic->fnic_lock, flags);
 658
 659        /* fnic should be in FC_TRANS_ETH_MODE */
 660        if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
 661                /* Check status of reset completion */
 662                if (!hdr_status) {
 663                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 664                                      "reset cmpl success\n");
 665                        /* Ready to send flogi out */
 666                        fnic->state = FNIC_IN_ETH_MODE;
 667                } else {
 668                        FNIC_SCSI_DBG(KERN_DEBUG,
 669                                      fnic->lport->host,
 670                                      "fnic fw_reset : failed %s\n",
 671                                      fnic_fcpio_status_to_str(hdr_status));
 672
 673                        /*
 674                         * Unable to change to eth mode, cannot send out flogi
 675                         * Change state to fc mode, so that subsequent Flogi
 676                         * requests from libFC will cause more attempts to
 677                         * reset the firmware. Free the cached flogi
 678                         */
 679                        fnic->state = FNIC_IN_FC_MODE;
 680                        atomic64_inc(&reset_stats->fw_reset_failures);
 681                        ret = -1;
 682                }
 683        } else {
 684                FNIC_SCSI_DBG(KERN_DEBUG,
 685                              fnic->lport->host,
 686                              "Unexpected state %s while processing"
 687                              " reset cmpl\n", fnic_state_to_str(fnic->state));
 688                atomic64_inc(&reset_stats->fw_reset_failures);
 689                ret = -1;
 690        }
 691
 692        /* Thread removing device blocks till firmware reset is complete */
 693        if (fnic->remove_wait)
 694                complete(fnic->remove_wait);
 695
 696        /*
 697         * If fnic is being removed, or fw reset failed
 698         * free the flogi frame. Else, send it out
 699         */
 700        if (fnic->remove_wait || ret) {
 701                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 702                skb_queue_purge(&fnic->tx_queue);
 703                goto reset_cmpl_handler_end;
 704        }
 705
 706        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 707
 708        fnic_flush_tx(fnic);
 709
 710 reset_cmpl_handler_end:
 711        fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 712
 713        return ret;
 714}
 715
 716/*
 717 * fnic_fcpio_flogi_reg_cmpl_handler
 718 * Routine to handle flogi register completion
 719 */
 720static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 721                                             struct fcpio_fw_req *desc)
 722{
 723        u8 type;
 724        u8 hdr_status;
 725        struct fcpio_tag tag;
 726        int ret = 0;
 727        unsigned long flags;
 728
 729        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 730
 731        /* Update fnic state based on status of flogi reg completion */
 732        spin_lock_irqsave(&fnic->fnic_lock, flags);
 733
 734        if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
 735
 736                /* Check flogi registration completion status */
 737                if (!hdr_status) {
 738                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 739                                      "flog reg succeeded\n");
 740                        fnic->state = FNIC_IN_FC_MODE;
 741                } else {
 742                        FNIC_SCSI_DBG(KERN_DEBUG,
 743                                      fnic->lport->host,
 744                                      "fnic flogi reg :failed %s\n",
 745                                      fnic_fcpio_status_to_str(hdr_status));
 746                        fnic->state = FNIC_IN_ETH_MODE;
 747                        ret = -1;
 748                }
 749        } else {
 750                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 751                              "Unexpected fnic state %s while"
 752                              " processing flogi reg completion\n",
 753                              fnic_state_to_str(fnic->state));
 754                ret = -1;
 755        }
 756
 757        if (!ret) {
 758                if (fnic->stop_rx_link_events) {
 759                        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 760                        goto reg_cmpl_handler_end;
 761                }
 762                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 763
 764                fnic_flush_tx(fnic);
 765                queue_work(fnic_event_queue, &fnic->frame_work);
 766        } else {
 767                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 768        }
 769
 770reg_cmpl_handler_end:
 771        return ret;
 772}
 773
 774static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
 775                                        u16 request_out)
 776{
 777        if (wq->to_clean_index <= wq->to_use_index) {
 778                /* out of range, stale request_out index */
 779                if (request_out < wq->to_clean_index ||
 780                    request_out >= wq->to_use_index)
 781                        return 0;
 782        } else {
 783                /* out of range, stale request_out index */
 784                if (request_out < wq->to_clean_index &&
 785                    request_out >= wq->to_use_index)
 786                        return 0;
 787        }
 788        /* request_out index is in range */
 789        return 1;
 790}
 791
 792
 793/*
 794 * Mark that ack received and store the Ack index. If there are multiple
 795 * acks received before Tx thread cleans it up, the latest value will be
 796 * used which is correct behavior. This state should be in the copy Wq
 797 * instead of in the fnic
 798 */
 799static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
 800                                          unsigned int cq_index,
 801                                          struct fcpio_fw_req *desc)
 802{
 803        struct vnic_wq_copy *wq;
 804        u16 request_out = desc->u.ack.request_out;
 805        unsigned long flags;
 806        u64 *ox_id_tag = (u64 *)(void *)desc;
 807
 808        /* mark the ack state */
 809        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
 810        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 811
 812        fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
 813        if (is_ack_index_in_range(wq, request_out)) {
 814                fnic->fw_ack_index[0] = request_out;
 815                fnic->fw_ack_recd[0] = 1;
 816        } else
 817                atomic64_inc(
 818                        &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
 819
 820        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 821        FNIC_TRACE(fnic_fcpio_ack_handler,
 822                  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
 823                  ox_id_tag[4], ox_id_tag[5]);
 824}
 825
 826/*
 827 * fnic_fcpio_icmnd_cmpl_handler
 828 * Routine to handle icmnd completions
 829 */
 830static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 831                                         struct fcpio_fw_req *desc)
 832{
 833        u8 type;
 834        u8 hdr_status;
 835        struct fcpio_tag tag;
 836        u32 id;
 837        u64 xfer_len = 0;
 838        struct fcpio_icmnd_cmpl *icmnd_cmpl;
 839        struct fnic_io_req *io_req;
 840        struct scsi_cmnd *sc;
 841        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 842        unsigned long flags;
 843        spinlock_t *io_lock;
 844        u64 cmd_trace;
 845        unsigned long start_time;
 846        unsigned long io_duration_time;
 847
 848        /* Decode the cmpl description to get the io_req id */
 849        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 850        fcpio_tag_id_dec(&tag, &id);
 851        icmnd_cmpl = &desc->u.icmnd_cmpl;
 852
 853        if (id >= fnic->fnic_max_tag_id) {
 854                shost_printk(KERN_ERR, fnic->lport->host,
 855                        "Tag out of range tag %x hdr status = %s\n",
 856                             id, fnic_fcpio_status_to_str(hdr_status));
 857                return;
 858        }
 859
 860        sc = scsi_host_find_tag(fnic->lport->host, id);
 861        WARN_ON_ONCE(!sc);
 862        if (!sc) {
 863                atomic64_inc(&fnic_stats->io_stats.sc_null);
 864                shost_printk(KERN_ERR, fnic->lport->host,
 865                          "icmnd_cmpl sc is null - "
 866                          "hdr status = %s tag = 0x%x desc = 0x%p\n",
 867                          fnic_fcpio_status_to_str(hdr_status), id, desc);
 868                FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
 869                          fnic->lport->host->host_no, id,
 870                          ((u64)icmnd_cmpl->_resvd0[1] << 16 |
 871                          (u64)icmnd_cmpl->_resvd0[0]),
 872                          ((u64)hdr_status << 16 |
 873                          (u64)icmnd_cmpl->scsi_status << 8 |
 874                          (u64)icmnd_cmpl->flags), desc,
 875                          (u64)icmnd_cmpl->residual, 0);
 876                return;
 877        }
 878
 879        io_lock = fnic_io_lock_hash(fnic, sc);
 880        spin_lock_irqsave(io_lock, flags);
 881        io_req = (struct fnic_io_req *)CMD_SP(sc);
 882        WARN_ON_ONCE(!io_req);
 883        if (!io_req) {
 884                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 885                CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
 886                spin_unlock_irqrestore(io_lock, flags);
 887                shost_printk(KERN_ERR, fnic->lport->host,
 888                          "icmnd_cmpl io_req is null - "
 889                          "hdr status = %s tag = 0x%x sc 0x%p\n",
 890                          fnic_fcpio_status_to_str(hdr_status), id, sc);
 891                return;
 892        }
 893        start_time = io_req->start_time;
 894
 895        /* firmware completed the io */
 896        io_req->io_completed = 1;
 897
 898        /*
 899         *  if SCSI-ML has already issued abort on this command,
 900         *  set completion of the IO. The abts path will clean it up
 901         */
 902        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
 903
 904                /*
 905                 * set the FNIC_IO_DONE so that this doesn't get
 906                 * flagged as 'out of order' if it was not aborted
 907                 */
 908                CMD_FLAGS(sc) |= FNIC_IO_DONE;
 909                CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
 910                spin_unlock_irqrestore(io_lock, flags);
 911                if(FCPIO_ABORTED == hdr_status)
 912                        CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
 913
 914                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 915                        "icmnd_cmpl abts pending "
 916                          "hdr status = %s tag = 0x%x sc = 0x%p"
 917                          "scsi_status = %x residual = %d\n",
 918                          fnic_fcpio_status_to_str(hdr_status),
 919                          id, sc,
 920                          icmnd_cmpl->scsi_status,
 921                          icmnd_cmpl->residual);
 922                return;
 923        }
 924
 925        /* Mark the IO as complete */
 926        CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 927
 928        icmnd_cmpl = &desc->u.icmnd_cmpl;
 929
 930        switch (hdr_status) {
 931        case FCPIO_SUCCESS:
 932                sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
 933                xfer_len = scsi_bufflen(sc);
 934                scsi_set_resid(sc, icmnd_cmpl->residual);
 935
 936                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 937                        xfer_len -= icmnd_cmpl->residual;
 938
 939                if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
 940                        atomic64_inc(&fnic_stats->misc_stats.check_condition);
 941
 942                if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
 943                        atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
 944                break;
 945
 946        case FCPIO_TIMEOUT:          /* request was timed out */
 947                atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
 948                sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
 949                break;
 950
 951        case FCPIO_ABORTED:          /* request was aborted */
 952                atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
 953                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 954                break;
 955
 956        case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
 957                atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
 958                scsi_set_resid(sc, icmnd_cmpl->residual);
 959                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 960                break;
 961
 962        case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
 963                atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
 964                sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
 965                break;
 966
 967        case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
 968                atomic64_inc(&fnic_stats->io_stats.io_not_found);
 969                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 970                break;
 971
 972        case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
 973                atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
 974                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 975                break;
 976
 977        case FCPIO_FW_ERR:           /* request was terminated due fw error */
 978                atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
 979                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 980                break;
 981
 982        case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
 983                atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
 984                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 985                break;
 986
 987        case FCPIO_INVALID_HEADER:   /* header contains invalid data */
 988        case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
 989        case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
 990        default:
 991                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 992                break;
 993        }
 994
 995        /* Break link with the SCSI command */
 996        CMD_SP(sc) = NULL;
 997        CMD_FLAGS(sc) |= FNIC_IO_DONE;
 998
 999        spin_unlock_irqrestore(io_lock, flags);
1000
1001        if (hdr_status != FCPIO_SUCCESS) {
1002                atomic64_inc(&fnic_stats->io_stats.io_failures);
1003                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
1004                             fnic_fcpio_status_to_str(hdr_status));
1005        }
1006
1007        fnic_release_ioreq_buf(fnic, io_req, sc);
1008
1009        mempool_free(io_req, fnic->io_req_pool);
1010
1011        cmd_trace = ((u64)hdr_status << 56) |
1012                  (u64)icmnd_cmpl->scsi_status << 48 |
1013                  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1014                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1015                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1016
1017        FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1018                  sc->device->host->host_no, id, sc,
1019                  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1020                  (u64)icmnd_cmpl->_resvd0[0] << 48 |
1021                  jiffies_to_msecs(jiffies - start_time)),
1022                  desc, cmd_trace,
1023                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1024
1025        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1026                fnic->lport->host_stats.fcp_input_requests++;
1027                fnic->fcp_input_bytes += xfer_len;
1028        } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1029                fnic->lport->host_stats.fcp_output_requests++;
1030                fnic->fcp_output_bytes += xfer_len;
1031        } else
1032                fnic->lport->host_stats.fcp_control_requests++;
1033
1034        atomic64_dec(&fnic_stats->io_stats.active_ios);
1035        if (atomic64_read(&fnic->io_cmpl_skip))
1036                atomic64_dec(&fnic->io_cmpl_skip);
1037        else
1038                atomic64_inc(&fnic_stats->io_stats.io_completions);
1039
1040
1041        io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1042
1043        if(io_duration_time <= 10)
1044                atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1045        else if(io_duration_time <= 100)
1046                atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1047        else if(io_duration_time <= 500)
1048                atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1049        else if(io_duration_time <= 5000)
1050                atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1051        else if(io_duration_time <= 10000)
1052                atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1053        else if(io_duration_time <= 30000)
1054                atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1055        else {
1056                atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1057
1058                if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1059                        atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1060        }
1061
1062        /* Call SCSI completion function to complete the IO */
1063        if (sc->scsi_done)
1064                sc->scsi_done(sc);
1065}
1066
1067/* fnic_fcpio_itmf_cmpl_handler
1068 * Routine to handle itmf completions
1069 */
1070static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1071                                        struct fcpio_fw_req *desc)
1072{
1073        u8 type;
1074        u8 hdr_status;
1075        struct fcpio_tag tag;
1076        u32 id;
1077        struct scsi_cmnd *sc;
1078        struct fnic_io_req *io_req;
1079        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1080        struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1081        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1082        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1083        unsigned long flags;
1084        spinlock_t *io_lock;
1085        unsigned long start_time;
1086
1087        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1088        fcpio_tag_id_dec(&tag, &id);
1089
1090        if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1091                shost_printk(KERN_ERR, fnic->lport->host,
1092                "Tag out of range tag %x hdr status = %s\n",
1093                id, fnic_fcpio_status_to_str(hdr_status));
1094                return;
1095        }
1096
1097        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1098        WARN_ON_ONCE(!sc);
1099        if (!sc) {
1100                atomic64_inc(&fnic_stats->io_stats.sc_null);
1101                shost_printk(KERN_ERR, fnic->lport->host,
1102                          "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1103                          fnic_fcpio_status_to_str(hdr_status), id);
1104                return;
1105        }
1106        io_lock = fnic_io_lock_hash(fnic, sc);
1107        spin_lock_irqsave(io_lock, flags);
1108        io_req = (struct fnic_io_req *)CMD_SP(sc);
1109        WARN_ON_ONCE(!io_req);
1110        if (!io_req) {
1111                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1112                spin_unlock_irqrestore(io_lock, flags);
1113                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1114                shost_printk(KERN_ERR, fnic->lport->host,
1115                          "itmf_cmpl io_req is null - "
1116                          "hdr status = %s tag = 0x%x sc 0x%p\n",
1117                          fnic_fcpio_status_to_str(hdr_status), id, sc);
1118                return;
1119        }
1120        start_time = io_req->start_time;
1121
1122        if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1123                /* Abort and terminate completion of device reset req */
1124                /* REVISIT : Add asserts about various flags */
1125                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1126                              "dev reset abts cmpl recd. id %x status %s\n",
1127                              id, fnic_fcpio_status_to_str(hdr_status));
1128                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1129                CMD_ABTS_STATUS(sc) = hdr_status;
1130                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1131                if (io_req->abts_done)
1132                        complete(io_req->abts_done);
1133                spin_unlock_irqrestore(io_lock, flags);
1134        } else if (id & FNIC_TAG_ABORT) {
1135                /* Completion of abort cmd */
1136                switch (hdr_status) {
1137                case FCPIO_SUCCESS:
1138                        break;
1139                case FCPIO_TIMEOUT:
1140                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1141                                atomic64_inc(&abts_stats->abort_fw_timeouts);
1142                        else
1143                                atomic64_inc(
1144                                        &term_stats->terminate_fw_timeouts);
1145                        break;
1146                case FCPIO_ITMF_REJECTED:
1147                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1148                                "abort reject recd. id %d\n",
1149                                (int)(id & FNIC_TAG_MASK));
1150                        break;
1151                case FCPIO_IO_NOT_FOUND:
1152                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1153                                atomic64_inc(&abts_stats->abort_io_not_found);
1154                        else
1155                                atomic64_inc(
1156                                        &term_stats->terminate_io_not_found);
1157                        break;
1158                default:
1159                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1160                                atomic64_inc(&abts_stats->abort_failures);
1161                        else
1162                                atomic64_inc(
1163                                        &term_stats->terminate_failures);
1164                        break;
1165                }
1166                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1167                        /* This is a late completion. Ignore it */
1168                        spin_unlock_irqrestore(io_lock, flags);
1169                        return;
1170                }
1171
1172                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1173                CMD_ABTS_STATUS(sc) = hdr_status;
1174
1175                /* If the status is IO not found consider it as success */
1176                if (hdr_status == FCPIO_IO_NOT_FOUND)
1177                        CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1178
1179                if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1180                        atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1181
1182                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1183                              "abts cmpl recd. id %d status %s\n",
1184                              (int)(id & FNIC_TAG_MASK),
1185                              fnic_fcpio_status_to_str(hdr_status));
1186
1187                /*
1188                 * If scsi_eh thread is blocked waiting for abts to complete,
1189                 * signal completion to it. IO will be cleaned in the thread
1190                 * else clean it in this context
1191                 */
1192                if (io_req->abts_done) {
1193                        complete(io_req->abts_done);
1194                        spin_unlock_irqrestore(io_lock, flags);
1195                } else {
1196                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1197                                      "abts cmpl, completing IO\n");
1198                        CMD_SP(sc) = NULL;
1199                        sc->result = (DID_ERROR << 16);
1200
1201                        spin_unlock_irqrestore(io_lock, flags);
1202
1203                        fnic_release_ioreq_buf(fnic, io_req, sc);
1204                        mempool_free(io_req, fnic->io_req_pool);
1205                        if (sc->scsi_done) {
1206                                FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1207                                        sc->device->host->host_no, id,
1208                                        sc,
1209                                        jiffies_to_msecs(jiffies - start_time),
1210                                        desc,
1211                                        (((u64)hdr_status << 40) |
1212                                        (u64)sc->cmnd[0] << 32 |
1213                                        (u64)sc->cmnd[2] << 24 |
1214                                        (u64)sc->cmnd[3] << 16 |
1215                                        (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1216                                        (((u64)CMD_FLAGS(sc) << 32) |
1217                                        CMD_STATE(sc)));
1218                                sc->scsi_done(sc);
1219                                atomic64_dec(&fnic_stats->io_stats.active_ios);
1220                                if (atomic64_read(&fnic->io_cmpl_skip))
1221                                        atomic64_dec(&fnic->io_cmpl_skip);
1222                                else
1223                                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1224                        }
1225                }
1226
1227        } else if (id & FNIC_TAG_DEV_RST) {
1228                /* Completion of device reset */
1229                CMD_LR_STATUS(sc) = hdr_status;
1230                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1231                        spin_unlock_irqrestore(io_lock, flags);
1232                        CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1233                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1234                                  sc->device->host->host_no, id, sc,
1235                                  jiffies_to_msecs(jiffies - start_time),
1236                                  desc, 0,
1237                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1238                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1239                                "Terminate pending "
1240                                "dev reset cmpl recd. id %d status %s\n",
1241                                (int)(id & FNIC_TAG_MASK),
1242                                fnic_fcpio_status_to_str(hdr_status));
1243                        return;
1244                }
1245                if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1246                        /* Need to wait for terminate completion */
1247                        spin_unlock_irqrestore(io_lock, flags);
1248                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1249                                  sc->device->host->host_no, id, sc,
1250                                  jiffies_to_msecs(jiffies - start_time),
1251                                  desc, 0,
1252                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1253                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1254                                "dev reset cmpl recd after time out. "
1255                                "id %d status %s\n",
1256                                (int)(id & FNIC_TAG_MASK),
1257                                fnic_fcpio_status_to_str(hdr_status));
1258                        return;
1259                }
1260                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1261                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1262                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1263                              "dev reset cmpl recd. id %d status %s\n",
1264                              (int)(id & FNIC_TAG_MASK),
1265                              fnic_fcpio_status_to_str(hdr_status));
1266                if (io_req->dr_done)
1267                        complete(io_req->dr_done);
1268                spin_unlock_irqrestore(io_lock, flags);
1269
1270        } else {
1271                shost_printk(KERN_ERR, fnic->lport->host,
1272                             "Unexpected itmf io state %s tag %x\n",
1273                             fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1274                spin_unlock_irqrestore(io_lock, flags);
1275        }
1276
1277}
1278
1279/*
1280 * fnic_fcpio_cmpl_handler
1281 * Routine to service the cq for wq_copy
1282 */
1283static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1284                                   unsigned int cq_index,
1285                                   struct fcpio_fw_req *desc)
1286{
1287        struct fnic *fnic = vnic_dev_priv(vdev);
1288
1289        switch (desc->hdr.type) {
1290        case FCPIO_ICMND_CMPL: /* fw completed a command */
1291        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1292        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1293        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1294        case FCPIO_RESET_CMPL: /* fw completed reset */
1295                atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1296                break;
1297        default:
1298                break;
1299        }
1300
1301        switch (desc->hdr.type) {
1302        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1303                fnic_fcpio_ack_handler(fnic, cq_index, desc);
1304                break;
1305
1306        case FCPIO_ICMND_CMPL: /* fw completed a command */
1307                fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1308                break;
1309
1310        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1311                fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1312                break;
1313
1314        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1315        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1316                fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1317                break;
1318
1319        case FCPIO_RESET_CMPL: /* fw completed reset */
1320                fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1321                break;
1322
1323        default:
1324                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1325                              "firmware completion type %d\n",
1326                              desc->hdr.type);
1327                break;
1328        }
1329
1330        return 0;
1331}
1332
1333/*
1334 * fnic_wq_copy_cmpl_handler
1335 * Routine to process wq copy
1336 */
1337int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1338{
1339        unsigned int wq_work_done = 0;
1340        unsigned int i, cq_index;
1341        unsigned int cur_work_done;
1342
1343        for (i = 0; i < fnic->wq_copy_count; i++) {
1344                cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1345                cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1346                                                     fnic_fcpio_cmpl_handler,
1347                                                     copy_work_to_do);
1348                wq_work_done += cur_work_done;
1349        }
1350        return wq_work_done;
1351}
1352
1353static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1354{
1355        int i;
1356        struct fnic_io_req *io_req;
1357        unsigned long flags = 0;
1358        struct scsi_cmnd *sc;
1359        spinlock_t *io_lock;
1360        unsigned long start_time = 0;
1361        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1362
1363        for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1364                if (i == exclude_id)
1365                        continue;
1366
1367                io_lock = fnic_io_lock_tag(fnic, i);
1368                spin_lock_irqsave(io_lock, flags);
1369                sc = scsi_host_find_tag(fnic->lport->host, i);
1370                if (!sc) {
1371                        spin_unlock_irqrestore(io_lock, flags);
1372                        continue;
1373                }
1374
1375                io_req = (struct fnic_io_req *)CMD_SP(sc);
1376                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1377                        !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1378                        /*
1379                         * We will be here only when FW completes reset
1380                         * without sending completions for outstanding ios.
1381                         */
1382                        CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1383                        if (io_req && io_req->dr_done)
1384                                complete(io_req->dr_done);
1385                        else if (io_req && io_req->abts_done)
1386                                complete(io_req->abts_done);
1387                        spin_unlock_irqrestore(io_lock, flags);
1388                        continue;
1389                } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1390                        spin_unlock_irqrestore(io_lock, flags);
1391                        continue;
1392                }
1393                if (!io_req) {
1394                        spin_unlock_irqrestore(io_lock, flags);
1395                        goto cleanup_scsi_cmd;
1396                }
1397
1398                CMD_SP(sc) = NULL;
1399
1400                spin_unlock_irqrestore(io_lock, flags);
1401
1402                /*
1403                 * If there is a scsi_cmnd associated with this io_req, then
1404                 * free the corresponding state
1405                 */
1406                start_time = io_req->start_time;
1407                fnic_release_ioreq_buf(fnic, io_req, sc);
1408                mempool_free(io_req, fnic->io_req_pool);
1409
1410cleanup_scsi_cmd:
1411                sc->result = DID_TRANSPORT_DISRUPTED << 16;
1412                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
1413                              " DID_TRANSPORT_DISRUPTED\n");
1414
1415                if (atomic64_read(&fnic->io_cmpl_skip))
1416                        atomic64_dec(&fnic->io_cmpl_skip);
1417                else
1418                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1419
1420                /* Complete the command to SCSI */
1421                if (sc->scsi_done) {
1422                        FNIC_TRACE(fnic_cleanup_io,
1423                                  sc->device->host->host_no, i, sc,
1424                                  jiffies_to_msecs(jiffies - start_time),
1425                                  0, ((u64)sc->cmnd[0] << 32 |
1426                                  (u64)sc->cmnd[2] << 24 |
1427                                  (u64)sc->cmnd[3] << 16 |
1428                                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1429                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1430
1431                        sc->scsi_done(sc);
1432                }
1433        }
1434}
1435
1436void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1437                                  struct fcpio_host_req *desc)
1438{
1439        u32 id;
1440        struct fnic *fnic = vnic_dev_priv(wq->vdev);
1441        struct fnic_io_req *io_req;
1442        struct scsi_cmnd *sc;
1443        unsigned long flags;
1444        spinlock_t *io_lock;
1445        unsigned long start_time = 0;
1446
1447        /* get the tag reference */
1448        fcpio_tag_id_dec(&desc->hdr.tag, &id);
1449        id &= FNIC_TAG_MASK;
1450
1451        if (id >= fnic->fnic_max_tag_id)
1452                return;
1453
1454        sc = scsi_host_find_tag(fnic->lport->host, id);
1455        if (!sc)
1456                return;
1457
1458        io_lock = fnic_io_lock_hash(fnic, sc);
1459        spin_lock_irqsave(io_lock, flags);
1460
1461        /* Get the IO context which this desc refers to */
1462        io_req = (struct fnic_io_req *)CMD_SP(sc);
1463
1464        /* fnic interrupts are turned off by now */
1465
1466        if (!io_req) {
1467                spin_unlock_irqrestore(io_lock, flags);
1468                goto wq_copy_cleanup_scsi_cmd;
1469        }
1470
1471        CMD_SP(sc) = NULL;
1472
1473        spin_unlock_irqrestore(io_lock, flags);
1474
1475        start_time = io_req->start_time;
1476        fnic_release_ioreq_buf(fnic, io_req, sc);
1477        mempool_free(io_req, fnic->io_req_pool);
1478
1479wq_copy_cleanup_scsi_cmd:
1480        sc->result = DID_NO_CONNECT << 16;
1481        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1482                      " DID_NO_CONNECT\n");
1483
1484        if (sc->scsi_done) {
1485                FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1486                          sc->device->host->host_no, id, sc,
1487                          jiffies_to_msecs(jiffies - start_time),
1488                          0, ((u64)sc->cmnd[0] << 32 |
1489                          (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1490                          (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1491                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1492
1493                sc->scsi_done(sc);
1494        }
1495}
1496
1497static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1498                                          u32 task_req, u8 *fc_lun,
1499                                          struct fnic_io_req *io_req)
1500{
1501        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1502        struct Scsi_Host *host = fnic->lport->host;
1503        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1504        unsigned long flags;
1505
1506        spin_lock_irqsave(host->host_lock, flags);
1507        if (unlikely(fnic_chk_state_flags_locked(fnic,
1508                                                FNIC_FLAGS_IO_BLOCKED))) {
1509                spin_unlock_irqrestore(host->host_lock, flags);
1510                return 1;
1511        } else
1512                atomic_inc(&fnic->in_flight);
1513        spin_unlock_irqrestore(host->host_lock, flags);
1514
1515        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1516
1517        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1518                free_wq_copy_descs(fnic, wq);
1519
1520        if (!vnic_wq_copy_desc_avail(wq)) {
1521                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1522                atomic_dec(&fnic->in_flight);
1523                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1524                        "fnic_queue_abort_io_req: failure: no descriptors\n");
1525                atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1526                return 1;
1527        }
1528        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1529                                     0, task_req, tag, fc_lun, io_req->port_id,
1530                                     fnic->config.ra_tov, fnic->config.ed_tov);
1531
1532        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1533        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1534                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1535                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1536                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1537
1538        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1539        atomic_dec(&fnic->in_flight);
1540
1541        return 0;
1542}
1543
1544static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1545{
1546        int tag;
1547        int abt_tag;
1548        int term_cnt = 0;
1549        struct fnic_io_req *io_req;
1550        spinlock_t *io_lock;
1551        unsigned long flags;
1552        struct scsi_cmnd *sc;
1553        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1554        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1555        struct scsi_lun fc_lun;
1556        enum fnic_ioreq_state old_ioreq_state;
1557
1558        FNIC_SCSI_DBG(KERN_DEBUG,
1559                      fnic->lport->host,
1560                      "fnic_rport_exch_reset called portid 0x%06x\n",
1561                      port_id);
1562
1563        if (fnic->in_remove)
1564                return;
1565
1566        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1567                abt_tag = tag;
1568                io_lock = fnic_io_lock_tag(fnic, tag);
1569                spin_lock_irqsave(io_lock, flags);
1570                sc = scsi_host_find_tag(fnic->lport->host, tag);
1571                if (!sc) {
1572                        spin_unlock_irqrestore(io_lock, flags);
1573                        continue;
1574                }
1575
1576                io_req = (struct fnic_io_req *)CMD_SP(sc);
1577
1578                if (!io_req || io_req->port_id != port_id) {
1579                        spin_unlock_irqrestore(io_lock, flags);
1580                        continue;
1581                }
1582
1583                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1584                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1585                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1586                        "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1587                        sc);
1588                        spin_unlock_irqrestore(io_lock, flags);
1589                        continue;
1590                }
1591
1592                /*
1593                 * Found IO that is still pending with firmware and
1594                 * belongs to rport that went away
1595                 */
1596                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1597                        spin_unlock_irqrestore(io_lock, flags);
1598                        continue;
1599                }
1600                if (io_req->abts_done) {
1601                        shost_printk(KERN_ERR, fnic->lport->host,
1602                        "fnic_rport_exch_reset: io_req->abts_done is set "
1603                        "state is %s\n",
1604                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1605                }
1606
1607                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1608                        shost_printk(KERN_ERR, fnic->lport->host,
1609                                  "rport_exch_reset "
1610                                  "IO not yet issued %p tag 0x%x flags "
1611                                  "%x state %d\n",
1612                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1613                }
1614                old_ioreq_state = CMD_STATE(sc);
1615                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1616                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1617                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1618                        atomic64_inc(&reset_stats->device_reset_terminates);
1619                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1620                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1621                        "fnic_rport_exch_reset dev rst sc 0x%p\n",
1622                        sc);
1623                }
1624
1625                BUG_ON(io_req->abts_done);
1626
1627                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1628                              "fnic_rport_reset_exch: Issuing abts\n");
1629
1630                spin_unlock_irqrestore(io_lock, flags);
1631
1632                /* Now queue the abort command to firmware */
1633                int_to_scsilun(sc->device->lun, &fc_lun);
1634
1635                if (fnic_queue_abort_io_req(fnic, abt_tag,
1636                                            FCPIO_ITMF_ABT_TASK_TERM,
1637                                            fc_lun.scsi_lun, io_req)) {
1638                        /*
1639                         * Revert the cmd state back to old state, if
1640                         * it hasn't changed in between. This cmd will get
1641                         * aborted later by scsi_eh, or cleaned up during
1642                         * lun reset
1643                         */
1644                        spin_lock_irqsave(io_lock, flags);
1645                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1646                                CMD_STATE(sc) = old_ioreq_state;
1647                        spin_unlock_irqrestore(io_lock, flags);
1648                } else {
1649                        spin_lock_irqsave(io_lock, flags);
1650                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1651                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1652                        else
1653                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1654                        spin_unlock_irqrestore(io_lock, flags);
1655                        atomic64_inc(&term_stats->terminates);
1656                        term_cnt++;
1657                }
1658        }
1659        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1660                atomic64_set(&term_stats->max_terminates, term_cnt);
1661
1662}
1663
1664void fnic_terminate_rport_io(struct fc_rport *rport)
1665{
1666        int tag;
1667        int abt_tag;
1668        int term_cnt = 0;
1669        struct fnic_io_req *io_req;
1670        spinlock_t *io_lock;
1671        unsigned long flags;
1672        struct scsi_cmnd *sc;
1673        struct scsi_lun fc_lun;
1674        struct fc_rport_libfc_priv *rdata;
1675        struct fc_lport *lport;
1676        struct fnic *fnic;
1677        struct fc_rport *cmd_rport;
1678        struct reset_stats *reset_stats;
1679        struct terminate_stats *term_stats;
1680        enum fnic_ioreq_state old_ioreq_state;
1681
1682        if (!rport) {
1683                printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1684                return;
1685        }
1686        rdata = rport->dd_data;
1687
1688        if (!rdata) {
1689                printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1690                return;
1691        }
1692        lport = rdata->local_port;
1693
1694        if (!lport) {
1695                printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1696                return;
1697        }
1698        fnic = lport_priv(lport);
1699        FNIC_SCSI_DBG(KERN_DEBUG,
1700                      fnic->lport->host, "fnic_terminate_rport_io called"
1701                      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1702                      rport->port_name, rport->node_name, rport,
1703                      rport->port_id);
1704
1705        if (fnic->in_remove)
1706                return;
1707
1708        reset_stats = &fnic->fnic_stats.reset_stats;
1709        term_stats = &fnic->fnic_stats.term_stats;
1710
1711        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1712                abt_tag = tag;
1713                io_lock = fnic_io_lock_tag(fnic, tag);
1714                spin_lock_irqsave(io_lock, flags);
1715                sc = scsi_host_find_tag(fnic->lport->host, tag);
1716                if (!sc) {
1717                        spin_unlock_irqrestore(io_lock, flags);
1718                        continue;
1719                }
1720
1721                cmd_rport = starget_to_rport(scsi_target(sc->device));
1722                if (rport != cmd_rport) {
1723                        spin_unlock_irqrestore(io_lock, flags);
1724                        continue;
1725                }
1726
1727                io_req = (struct fnic_io_req *)CMD_SP(sc);
1728
1729                if (!io_req || rport != cmd_rport) {
1730                        spin_unlock_irqrestore(io_lock, flags);
1731                        continue;
1732                }
1733
1734                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1735                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1736                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1737                        "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1738                        sc);
1739                        spin_unlock_irqrestore(io_lock, flags);
1740                        continue;
1741                }
1742                /*
1743                 * Found IO that is still pending with firmware and
1744                 * belongs to rport that went away
1745                 */
1746                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1747                        spin_unlock_irqrestore(io_lock, flags);
1748                        continue;
1749                }
1750                if (io_req->abts_done) {
1751                        shost_printk(KERN_ERR, fnic->lport->host,
1752                        "fnic_terminate_rport_io: io_req->abts_done is set "
1753                        "state is %s\n",
1754                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1755                }
1756                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1757                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1758                                  "fnic_terminate_rport_io "
1759                                  "IO not yet issued %p tag 0x%x flags "
1760                                  "%x state %d\n",
1761                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1762                }
1763                old_ioreq_state = CMD_STATE(sc);
1764                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1765                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1766                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1767                        atomic64_inc(&reset_stats->device_reset_terminates);
1768                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1769                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1770                        "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1771                }
1772
1773                BUG_ON(io_req->abts_done);
1774
1775                FNIC_SCSI_DBG(KERN_DEBUG,
1776                              fnic->lport->host,
1777                              "fnic_terminate_rport_io: Issuing abts\n");
1778
1779                spin_unlock_irqrestore(io_lock, flags);
1780
1781                /* Now queue the abort command to firmware */
1782                int_to_scsilun(sc->device->lun, &fc_lun);
1783
1784                if (fnic_queue_abort_io_req(fnic, abt_tag,
1785                                            FCPIO_ITMF_ABT_TASK_TERM,
1786                                            fc_lun.scsi_lun, io_req)) {
1787                        /*
1788                         * Revert the cmd state back to old state, if
1789                         * it hasn't changed in between. This cmd will get
1790                         * aborted later by scsi_eh, or cleaned up during
1791                         * lun reset
1792                         */
1793                        spin_lock_irqsave(io_lock, flags);
1794                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1795                                CMD_STATE(sc) = old_ioreq_state;
1796                        spin_unlock_irqrestore(io_lock, flags);
1797                } else {
1798                        spin_lock_irqsave(io_lock, flags);
1799                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1800                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1801                        else
1802                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1803                        spin_unlock_irqrestore(io_lock, flags);
1804                        atomic64_inc(&term_stats->terminates);
1805                        term_cnt++;
1806                }
1807        }
1808        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1809                atomic64_set(&term_stats->max_terminates, term_cnt);
1810
1811}
1812
1813/*
1814 * This function is exported to SCSI for sending abort cmnds.
1815 * A SCSI IO is represented by a io_req in the driver.
1816 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1817 */
1818int fnic_abort_cmd(struct scsi_cmnd *sc)
1819{
1820        struct fc_lport *lp;
1821        struct fnic *fnic;
1822        struct fnic_io_req *io_req = NULL;
1823        struct fc_rport *rport;
1824        spinlock_t *io_lock;
1825        unsigned long flags;
1826        unsigned long start_time = 0;
1827        int ret = SUCCESS;
1828        u32 task_req = 0;
1829        struct scsi_lun fc_lun;
1830        struct fnic_stats *fnic_stats;
1831        struct abort_stats *abts_stats;
1832        struct terminate_stats *term_stats;
1833        int tag;
1834        unsigned long abt_issued_time;
1835        DECLARE_COMPLETION_ONSTACK(tm_done);
1836
1837        /* Wait for rport to unblock */
1838        fc_block_scsi_eh(sc);
1839
1840        /* Get local-port, check ready and link up */
1841        lp = shost_priv(sc->device->host);
1842
1843        fnic = lport_priv(lp);
1844        fnic_stats = &fnic->fnic_stats;
1845        abts_stats = &fnic->fnic_stats.abts_stats;
1846        term_stats = &fnic->fnic_stats.term_stats;
1847
1848        rport = starget_to_rport(scsi_target(sc->device));
1849        tag = sc->request->tag;
1850        FNIC_SCSI_DBG(KERN_DEBUG,
1851                fnic->lport->host,
1852                "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
1853                rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1854
1855        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1856
1857        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1858                ret = FAILED;
1859                goto fnic_abort_cmd_end;
1860        }
1861
1862        /*
1863         * Avoid a race between SCSI issuing the abort and the device
1864         * completing the command.
1865         *
1866         * If the command is already completed by the fw cmpl code,
1867         * we just return SUCCESS from here. This means that the abort
1868         * succeeded. In the SCSI ML, since the timeout for command has
1869         * happened, the completion wont actually complete the command
1870         * and it will be considered as an aborted command
1871         *
1872         * The CMD_SP will not be cleared except while holding io_req_lock.
1873         */
1874        io_lock = fnic_io_lock_hash(fnic, sc);
1875        spin_lock_irqsave(io_lock, flags);
1876        io_req = (struct fnic_io_req *)CMD_SP(sc);
1877        if (!io_req) {
1878                spin_unlock_irqrestore(io_lock, flags);
1879                goto fnic_abort_cmd_end;
1880        }
1881
1882        io_req->abts_done = &tm_done;
1883
1884        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1885                spin_unlock_irqrestore(io_lock, flags);
1886                goto wait_pending;
1887        }
1888
1889        abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1890        if (abt_issued_time <= 6000)
1891                atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1892        else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1893                atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1894        else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1895                atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1896        else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1897                atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1898        else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1899                atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1900        else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1901                atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1902        else
1903                atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1904
1905        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1906                "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1907        /*
1908         * Command is still pending, need to abort it
1909         * If the firmware completes the command after this point,
1910         * the completion wont be done till mid-layer, since abort
1911         * has already started.
1912         */
1913        CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1914        CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1915
1916        spin_unlock_irqrestore(io_lock, flags);
1917
1918        /*
1919         * Check readiness of the remote port. If the path to remote
1920         * port is up, then send abts to the remote port to terminate
1921         * the IO. Else, just locally terminate the IO in the firmware
1922         */
1923        if (fc_remote_port_chkready(rport) == 0)
1924                task_req = FCPIO_ITMF_ABT_TASK;
1925        else {
1926                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1927                task_req = FCPIO_ITMF_ABT_TASK_TERM;
1928        }
1929
1930        /* Now queue the abort command to firmware */
1931        int_to_scsilun(sc->device->lun, &fc_lun);
1932
1933        if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1934                                    fc_lun.scsi_lun, io_req)) {
1935                spin_lock_irqsave(io_lock, flags);
1936                io_req = (struct fnic_io_req *)CMD_SP(sc);
1937                if (io_req)
1938                        io_req->abts_done = NULL;
1939                spin_unlock_irqrestore(io_lock, flags);
1940                ret = FAILED;
1941                goto fnic_abort_cmd_end;
1942        }
1943        if (task_req == FCPIO_ITMF_ABT_TASK) {
1944                CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1945                atomic64_inc(&fnic_stats->abts_stats.aborts);
1946        } else {
1947                CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1948                atomic64_inc(&fnic_stats->term_stats.terminates);
1949        }
1950
1951        /*
1952         * We queued an abort IO, wait for its completion.
1953         * Once the firmware completes the abort command, it will
1954         * wake up this thread.
1955         */
1956 wait_pending:
1957        wait_for_completion_timeout(&tm_done,
1958                                    msecs_to_jiffies
1959                                    (2 * fnic->config.ra_tov +
1960                                     fnic->config.ed_tov));
1961
1962        /* Check the abort status */
1963        spin_lock_irqsave(io_lock, flags);
1964
1965        io_req = (struct fnic_io_req *)CMD_SP(sc);
1966        if (!io_req) {
1967                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1968                spin_unlock_irqrestore(io_lock, flags);
1969                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1970                ret = FAILED;
1971                goto fnic_abort_cmd_end;
1972        }
1973        io_req->abts_done = NULL;
1974
1975        /* fw did not complete abort, timed out */
1976        if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1977                spin_unlock_irqrestore(io_lock, flags);
1978                if (task_req == FCPIO_ITMF_ABT_TASK) {
1979                        FNIC_SCSI_DBG(KERN_INFO,
1980                                fnic->lport->host, "Abort Driver Timeout\n");
1981                        atomic64_inc(&abts_stats->abort_drv_timeouts);
1982                } else {
1983                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1984                                "Terminate Driver Timeout\n");
1985                        atomic64_inc(&term_stats->terminate_drv_timeouts);
1986                }
1987                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1988                ret = FAILED;
1989                goto fnic_abort_cmd_end;
1990        }
1991
1992        /* IO out of order */
1993
1994        if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1995                spin_unlock_irqrestore(io_lock, flags);
1996                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1997                        "Issuing Host reset due to out of order IO\n");
1998
1999                ret = FAILED;
2000                goto fnic_abort_cmd_end;
2001        }
2002
2003        CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2004
2005        start_time = io_req->start_time;
2006        /*
2007         * firmware completed the abort, check the status,
2008         * free the io_req if successful. If abort fails,
2009         * Device reset will clean the I/O.
2010         */
2011        if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
2012                CMD_SP(sc) = NULL;
2013        else {
2014                ret = FAILED;
2015                spin_unlock_irqrestore(io_lock, flags);
2016                goto fnic_abort_cmd_end;
2017        }
2018
2019        spin_unlock_irqrestore(io_lock, flags);
2020
2021        fnic_release_ioreq_buf(fnic, io_req, sc);
2022        mempool_free(io_req, fnic->io_req_pool);
2023
2024        if (sc->scsi_done) {
2025        /* Call SCSI completion function to complete the IO */
2026                sc->result = (DID_ABORT << 16);
2027                sc->scsi_done(sc);
2028                atomic64_dec(&fnic_stats->io_stats.active_ios);
2029                if (atomic64_read(&fnic->io_cmpl_skip))
2030                        atomic64_dec(&fnic->io_cmpl_skip);
2031                else
2032                        atomic64_inc(&fnic_stats->io_stats.io_completions);
2033        }
2034
2035fnic_abort_cmd_end:
2036        FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
2037                  sc->request->tag, sc,
2038                  jiffies_to_msecs(jiffies - start_time),
2039                  0, ((u64)sc->cmnd[0] << 32 |
2040                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2041                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2042                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2043
2044        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2045                      "Returning from abort cmd type %x %s\n", task_req,
2046                      (ret == SUCCESS) ?
2047                      "SUCCESS" : "FAILED");
2048        return ret;
2049}
2050
2051static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2052                                       struct scsi_cmnd *sc,
2053                                       struct fnic_io_req *io_req)
2054{
2055        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
2056        struct Scsi_Host *host = fnic->lport->host;
2057        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2058        struct scsi_lun fc_lun;
2059        int ret = 0;
2060        unsigned long intr_flags;
2061
2062        spin_lock_irqsave(host->host_lock, intr_flags);
2063        if (unlikely(fnic_chk_state_flags_locked(fnic,
2064                                                FNIC_FLAGS_IO_BLOCKED))) {
2065                spin_unlock_irqrestore(host->host_lock, intr_flags);
2066                return FAILED;
2067        } else
2068                atomic_inc(&fnic->in_flight);
2069        spin_unlock_irqrestore(host->host_lock, intr_flags);
2070
2071        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2072
2073        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2074                free_wq_copy_descs(fnic, wq);
2075
2076        if (!vnic_wq_copy_desc_avail(wq)) {
2077                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2078                          "queue_dr_io_req failure - no descriptors\n");
2079                atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2080                ret = -EAGAIN;
2081                goto lr_io_req_end;
2082        }
2083
2084        /* fill in the lun info */
2085        int_to_scsilun(sc->device->lun, &fc_lun);
2086
2087        fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2088                                     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2089                                     fc_lun.scsi_lun, io_req->port_id,
2090                                     fnic->config.ra_tov, fnic->config.ed_tov);
2091
2092        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2093        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2094                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2095                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2096                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2097
2098lr_io_req_end:
2099        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2100        atomic_dec(&fnic->in_flight);
2101
2102        return ret;
2103}
2104
2105/*
2106 * Clean up any pending aborts on the lun
2107 * For each outstanding IO on this lun, whose abort is not completed by fw,
2108 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2109 * successfully aborted, 1 otherwise
2110 */
2111static int fnic_clean_pending_aborts(struct fnic *fnic,
2112                                     struct scsi_cmnd *lr_sc,
2113                                         bool new_sc)
2114
2115{
2116        int tag, abt_tag;
2117        struct fnic_io_req *io_req;
2118        spinlock_t *io_lock;
2119        unsigned long flags;
2120        int ret = 0;
2121        struct scsi_cmnd *sc;
2122        struct scsi_lun fc_lun;
2123        struct scsi_device *lun_dev = lr_sc->device;
2124        DECLARE_COMPLETION_ONSTACK(tm_done);
2125        enum fnic_ioreq_state old_ioreq_state;
2126
2127        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2128                io_lock = fnic_io_lock_tag(fnic, tag);
2129                spin_lock_irqsave(io_lock, flags);
2130                sc = scsi_host_find_tag(fnic->lport->host, tag);
2131                /*
2132                 * ignore this lun reset cmd if issued using new SC
2133                 * or cmds that do not belong to this lun
2134                 */
2135                if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2136                        spin_unlock_irqrestore(io_lock, flags);
2137                        continue;
2138                }
2139
2140                io_req = (struct fnic_io_req *)CMD_SP(sc);
2141
2142                if (!io_req || sc->device != lun_dev) {
2143                        spin_unlock_irqrestore(io_lock, flags);
2144                        continue;
2145                }
2146
2147                /*
2148                 * Found IO that is still pending with firmware and
2149                 * belongs to the LUN that we are resetting
2150                 */
2151                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2152                              "Found IO in %s on lun\n",
2153                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2154
2155                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2156                        spin_unlock_irqrestore(io_lock, flags);
2157                        continue;
2158                }
2159                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2160                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2161                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2162                                "%s dev rst not pending sc 0x%p\n", __func__,
2163                                sc);
2164                        spin_unlock_irqrestore(io_lock, flags);
2165                        continue;
2166                }
2167
2168                if (io_req->abts_done)
2169                        shost_printk(KERN_ERR, fnic->lport->host,
2170                          "%s: io_req->abts_done is set state is %s\n",
2171                          __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2172                old_ioreq_state = CMD_STATE(sc);
2173                /*
2174                 * Any pending IO issued prior to reset is expected to be
2175                 * in abts pending state, if not we need to set
2176                 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2177                 * When IO is completed, the IO will be handed over and
2178                 * handled in this function.
2179                 */
2180                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2181
2182                BUG_ON(io_req->abts_done);
2183
2184                abt_tag = tag;
2185                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2186                        abt_tag |= FNIC_TAG_DEV_RST;
2187                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2188                                  "%s: dev rst sc 0x%p\n", __func__, sc);
2189                }
2190
2191                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2192                io_req->abts_done = &tm_done;
2193                spin_unlock_irqrestore(io_lock, flags);
2194
2195                /* Now queue the abort command to firmware */
2196                int_to_scsilun(sc->device->lun, &fc_lun);
2197
2198                if (fnic_queue_abort_io_req(fnic, abt_tag,
2199                                            FCPIO_ITMF_ABT_TASK_TERM,
2200                                            fc_lun.scsi_lun, io_req)) {
2201                        spin_lock_irqsave(io_lock, flags);
2202                        io_req = (struct fnic_io_req *)CMD_SP(sc);
2203                        if (io_req)
2204                                io_req->abts_done = NULL;
2205                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2206                                CMD_STATE(sc) = old_ioreq_state;
2207                        spin_unlock_irqrestore(io_lock, flags);
2208                        ret = 1;
2209                        goto clean_pending_aborts_end;
2210                } else {
2211                        spin_lock_irqsave(io_lock, flags);
2212                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2213                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2214                        spin_unlock_irqrestore(io_lock, flags);
2215                }
2216                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2217
2218                wait_for_completion_timeout(&tm_done,
2219                                            msecs_to_jiffies
2220                                            (fnic->config.ed_tov));
2221
2222                /* Recheck cmd state to check if it is now aborted */
2223                spin_lock_irqsave(io_lock, flags);
2224                io_req = (struct fnic_io_req *)CMD_SP(sc);
2225                if (!io_req) {
2226                        spin_unlock_irqrestore(io_lock, flags);
2227                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2228                        continue;
2229                }
2230
2231                io_req->abts_done = NULL;
2232
2233                /* if abort is still pending with fw, fail */
2234                if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2235                        spin_unlock_irqrestore(io_lock, flags);
2236                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2237                        ret = 1;
2238                        goto clean_pending_aborts_end;
2239                }
2240                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2241
2242                /* original sc used for lr is handled by dev reset code */
2243                if (sc != lr_sc)
2244                        CMD_SP(sc) = NULL;
2245                spin_unlock_irqrestore(io_lock, flags);
2246
2247                /* original sc used for lr is handled by dev reset code */
2248                if (sc != lr_sc) {
2249                        fnic_release_ioreq_buf(fnic, io_req, sc);
2250                        mempool_free(io_req, fnic->io_req_pool);
2251                }
2252
2253                /*
2254                 * Any IO is returned during reset, it needs to call scsi_done
2255                 * to return the scsi_cmnd to upper layer.
2256                 */
2257                if (sc->scsi_done) {
2258                        /* Set result to let upper SCSI layer retry */
2259                        sc->result = DID_RESET << 16;
2260                        sc->scsi_done(sc);
2261                }
2262        }
2263
2264        schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2265
2266        /* walk again to check, if IOs are still pending in fw */
2267        if (fnic_is_abts_pending(fnic, lr_sc))
2268                ret = FAILED;
2269
2270clean_pending_aborts_end:
2271        return ret;
2272}
2273
2274/**
2275 * fnic_scsi_host_start_tag
2276 * Allocates tagid from host's tag list
2277 **/
2278static inline int
2279fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2280{
2281        struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2282        int tag, ret = SCSI_NO_TAG;
2283
2284        BUG_ON(!bqt);
2285        if (!bqt) {
2286                pr_err("Tags are not supported\n");
2287                goto end;
2288        }
2289
2290        do {
2291                tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2292                if (tag >= bqt->max_depth) {
2293                        pr_err("Tag allocation failure\n");
2294                        goto end;
2295                }
2296        } while (test_and_set_bit(tag, bqt->tag_map));
2297
2298        bqt->tag_index[tag] = sc->request;
2299        sc->request->tag = tag;
2300        sc->tag = tag;
2301        if (!sc->request->special)
2302                sc->request->special = sc;
2303
2304        ret = tag;
2305
2306end:
2307        return ret;
2308}
2309
2310/**
2311 * fnic_scsi_host_end_tag
2312 * frees tag allocated by fnic_scsi_host_start_tag.
2313 **/
2314static inline void
2315fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2316{
2317        struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2318        int tag = sc->request->tag;
2319
2320        if (tag == SCSI_NO_TAG)
2321                return;
2322
2323        BUG_ON(!bqt || !bqt->tag_index[tag]);
2324        if (!bqt)
2325                return;
2326
2327        bqt->tag_index[tag] = NULL;
2328        clear_bit(tag, bqt->tag_map);
2329
2330        return;
2331}
2332
2333/*
2334 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2335 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2336 * on the LUN.
2337 */
2338int fnic_device_reset(struct scsi_cmnd *sc)
2339{
2340        struct fc_lport *lp;
2341        struct fnic *fnic;
2342        struct fnic_io_req *io_req = NULL;
2343        struct fc_rport *rport;
2344        int status;
2345        int ret = FAILED;
2346        spinlock_t *io_lock;
2347        unsigned long flags;
2348        unsigned long start_time = 0;
2349        struct scsi_lun fc_lun;
2350        struct fnic_stats *fnic_stats;
2351        struct reset_stats *reset_stats;
2352        int tag = 0;
2353        DECLARE_COMPLETION_ONSTACK(tm_done);
2354        int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
2355        bool new_sc = 0;
2356
2357        /* Wait for rport to unblock */
2358        fc_block_scsi_eh(sc);
2359
2360        /* Get local-port, check ready and link up */
2361        lp = shost_priv(sc->device->host);
2362
2363        fnic = lport_priv(lp);
2364        fnic_stats = &fnic->fnic_stats;
2365        reset_stats = &fnic->fnic_stats.reset_stats;
2366
2367        atomic64_inc(&reset_stats->device_resets);
2368
2369        rport = starget_to_rport(scsi_target(sc->device));
2370        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2371                      "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
2372                      rport->port_id, sc->device->lun, sc);
2373
2374        if (lp->state != LPORT_ST_READY || !(lp->link_up))
2375                goto fnic_device_reset_end;
2376
2377        /* Check if remote port up */
2378        if (fc_remote_port_chkready(rport)) {
2379                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2380                goto fnic_device_reset_end;
2381        }
2382
2383        CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2384        /* Allocate tag if not present */
2385
2386        tag = sc->request->tag;
2387        if (unlikely(tag < 0)) {
2388                /*
2389                 * XXX(hch): current the midlayer fakes up a struct
2390                 * request for the explicit reset ioctls, and those
2391                 * don't have a tag allocated to them.  The below
2392                 * code pokes into midlayer structures to paper over
2393                 * this design issue, but that won't work for blk-mq.
2394                 *
2395                 * Either someone who can actually test the hardware
2396                 * will have to come up with a similar hack for the
2397                 * blk-mq case, or we'll have to bite the bullet and
2398                 * fix the way the EH ioctls work for real, but until
2399                 * that happens we fail these explicit requests here.
2400                 */
2401
2402                tag = fnic_scsi_host_start_tag(fnic, sc);
2403                if (unlikely(tag == SCSI_NO_TAG))
2404                        goto fnic_device_reset_end;
2405                tag_gen_flag = 1;
2406                new_sc = 1;
2407        }
2408        io_lock = fnic_io_lock_hash(fnic, sc);
2409        spin_lock_irqsave(io_lock, flags);
2410        io_req = (struct fnic_io_req *)CMD_SP(sc);
2411
2412        /*
2413         * If there is a io_req attached to this command, then use it,
2414         * else allocate a new one.
2415         */
2416        if (!io_req) {
2417                io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2418                if (!io_req) {
2419                        spin_unlock_irqrestore(io_lock, flags);
2420                        goto fnic_device_reset_end;
2421                }
2422                memset(io_req, 0, sizeof(*io_req));
2423                io_req->port_id = rport->port_id;
2424                CMD_SP(sc) = (char *)io_req;
2425        }
2426        io_req->dr_done = &tm_done;
2427        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2428        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2429        spin_unlock_irqrestore(io_lock, flags);
2430
2431        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2432
2433        /*
2434         * issue the device reset, if enqueue failed, clean up the ioreq
2435         * and break assoc with scsi cmd
2436         */
2437        if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2438                spin_lock_irqsave(io_lock, flags);
2439                io_req = (struct fnic_io_req *)CMD_SP(sc);
2440                if (io_req)
2441                        io_req->dr_done = NULL;
2442                goto fnic_device_reset_clean;
2443        }
2444        spin_lock_irqsave(io_lock, flags);
2445        CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2446        spin_unlock_irqrestore(io_lock, flags);
2447
2448        /*
2449         * Wait on the local completion for LUN reset.  The io_req may be
2450         * freed while we wait since we hold no lock.
2451         */
2452        wait_for_completion_timeout(&tm_done,
2453                                    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2454
2455        spin_lock_irqsave(io_lock, flags);
2456        io_req = (struct fnic_io_req *)CMD_SP(sc);
2457        if (!io_req) {
2458                spin_unlock_irqrestore(io_lock, flags);
2459                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2460                                "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2461                goto fnic_device_reset_end;
2462        }
2463        io_req->dr_done = NULL;
2464
2465        status = CMD_LR_STATUS(sc);
2466
2467        /*
2468         * If lun reset not completed, bail out with failed. io_req
2469         * gets cleaned up during higher levels of EH
2470         */
2471        if (status == FCPIO_INVALID_CODE) {
2472                atomic64_inc(&reset_stats->device_reset_timeouts);
2473                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2474                              "Device reset timed out\n");
2475                CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2476                spin_unlock_irqrestore(io_lock, flags);
2477                int_to_scsilun(sc->device->lun, &fc_lun);
2478                /*
2479                 * Issue abort and terminate on device reset request.
2480                 * If q'ing of terminate fails, retry it after a delay.
2481                 */
2482                while (1) {
2483                        spin_lock_irqsave(io_lock, flags);
2484                        if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2485                                spin_unlock_irqrestore(io_lock, flags);
2486                                break;
2487                        }
2488                        spin_unlock_irqrestore(io_lock, flags);
2489                        if (fnic_queue_abort_io_req(fnic,
2490                                tag | FNIC_TAG_DEV_RST,
2491                                FCPIO_ITMF_ABT_TASK_TERM,
2492                                fc_lun.scsi_lun, io_req)) {
2493                                wait_for_completion_timeout(&tm_done,
2494                                msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2495                        } else {
2496                                spin_lock_irqsave(io_lock, flags);
2497                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2498                                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2499                                io_req->abts_done = &tm_done;
2500                                spin_unlock_irqrestore(io_lock, flags);
2501                                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2502                                "Abort and terminate issued on Device reset "
2503                                "tag 0x%x sc 0x%p\n", tag, sc);
2504                                break;
2505                        }
2506                }
2507                while (1) {
2508                        spin_lock_irqsave(io_lock, flags);
2509                        if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2510                                spin_unlock_irqrestore(io_lock, flags);
2511                                wait_for_completion_timeout(&tm_done,
2512                                msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2513                                break;
2514                        } else {
2515                                io_req = (struct fnic_io_req *)CMD_SP(sc);
2516                                io_req->abts_done = NULL;
2517                                goto fnic_device_reset_clean;
2518                        }
2519                }
2520        } else {
2521                spin_unlock_irqrestore(io_lock, flags);
2522        }
2523
2524        /* Completed, but not successful, clean up the io_req, return fail */
2525        if (status != FCPIO_SUCCESS) {
2526                spin_lock_irqsave(io_lock, flags);
2527                FNIC_SCSI_DBG(KERN_DEBUG,
2528                              fnic->lport->host,
2529                              "Device reset completed - failed\n");
2530                io_req = (struct fnic_io_req *)CMD_SP(sc);
2531                goto fnic_device_reset_clean;
2532        }
2533
2534        /*
2535         * Clean up any aborts on this lun that have still not
2536         * completed. If any of these fail, then LUN reset fails.
2537         * clean_pending_aborts cleans all cmds on this lun except
2538         * the lun reset cmd. If all cmds get cleaned, the lun reset
2539         * succeeds
2540         */
2541        if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2542                spin_lock_irqsave(io_lock, flags);
2543                io_req = (struct fnic_io_req *)CMD_SP(sc);
2544                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2545                              "Device reset failed"
2546                              " since could not abort all IOs\n");
2547                goto fnic_device_reset_clean;
2548        }
2549
2550        /* Clean lun reset command */
2551        spin_lock_irqsave(io_lock, flags);
2552        io_req = (struct fnic_io_req *)CMD_SP(sc);
2553        if (io_req)
2554                /* Completed, and successful */
2555                ret = SUCCESS;
2556
2557fnic_device_reset_clean:
2558        if (io_req)
2559                CMD_SP(sc) = NULL;
2560
2561        spin_unlock_irqrestore(io_lock, flags);
2562
2563        if (io_req) {
2564                start_time = io_req->start_time;
2565                fnic_release_ioreq_buf(fnic, io_req, sc);
2566                mempool_free(io_req, fnic->io_req_pool);
2567        }
2568
2569fnic_device_reset_end:
2570        FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2571                  sc->request->tag, sc,
2572                  jiffies_to_msecs(jiffies - start_time),
2573                  0, ((u64)sc->cmnd[0] << 32 |
2574                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2575                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2576                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2577
2578        /* free tag if it is allocated */
2579        if (unlikely(tag_gen_flag))
2580                fnic_scsi_host_end_tag(fnic, sc);
2581
2582        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2583                      "Returning from device reset %s\n",
2584                      (ret == SUCCESS) ?
2585                      "SUCCESS" : "FAILED");
2586
2587        if (ret == FAILED)
2588                atomic64_inc(&reset_stats->device_reset_failures);
2589
2590        return ret;
2591}
2592
2593/* Clean up all IOs, clean up libFC local port */
2594int fnic_reset(struct Scsi_Host *shost)
2595{
2596        struct fc_lport *lp;
2597        struct fnic *fnic;
2598        int ret = 0;
2599        struct reset_stats *reset_stats;
2600
2601        lp = shost_priv(shost);
2602        fnic = lport_priv(lp);
2603        reset_stats = &fnic->fnic_stats.reset_stats;
2604
2605        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2606                      "fnic_reset called\n");
2607
2608        atomic64_inc(&reset_stats->fnic_resets);
2609
2610        /*
2611         * Reset local port, this will clean up libFC exchanges,
2612         * reset remote port sessions, and if link is up, begin flogi
2613         */
2614        ret = lp->tt.lport_reset(lp);
2615
2616        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2617                      "Returning from fnic reset %s\n",
2618                      (ret == 0) ?
2619                      "SUCCESS" : "FAILED");
2620
2621        if (ret == 0)
2622                atomic64_inc(&reset_stats->fnic_reset_completions);
2623        else
2624                atomic64_inc(&reset_stats->fnic_reset_failures);
2625
2626        return ret;
2627}
2628
2629/*
2630 * SCSI Error handling calls driver's eh_host_reset if all prior
2631 * error handling levels return FAILED. If host reset completes
2632 * successfully, and if link is up, then Fabric login begins.
2633 *
2634 * Host Reset is the highest level of error recovery. If this fails, then
2635 * host is offlined by SCSI.
2636 *
2637 */
2638int fnic_host_reset(struct scsi_cmnd *sc)
2639{
2640        int ret;
2641        unsigned long wait_host_tmo;
2642        struct Scsi_Host *shost = sc->device->host;
2643        struct fc_lport *lp = shost_priv(shost);
2644        struct fnic *fnic = lport_priv(lp);
2645        unsigned long flags;
2646
2647        spin_lock_irqsave(&fnic->fnic_lock, flags);
2648        if (fnic->internal_reset_inprogress == 0) {
2649                fnic->internal_reset_inprogress = 1;
2650        } else {
2651                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2652                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2653                        "host reset in progress skipping another host reset\n");
2654                return SUCCESS;
2655        }
2656        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2657
2658        /*
2659         * If fnic_reset is successful, wait for fabric login to complete
2660         * scsi-ml tries to send a TUR to every device if host reset is
2661         * successful, so before returning to scsi, fabric should be up
2662         */
2663        ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2664        if (ret == SUCCESS) {
2665                wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2666                ret = FAILED;
2667                while (time_before(jiffies, wait_host_tmo)) {
2668                        if ((lp->state == LPORT_ST_READY) &&
2669                            (lp->link_up)) {
2670                                ret = SUCCESS;
2671                                break;
2672                        }
2673                        ssleep(1);
2674                }
2675        }
2676
2677        spin_lock_irqsave(&fnic->fnic_lock, flags);
2678        fnic->internal_reset_inprogress = 0;
2679        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2680        return ret;
2681}
2682
2683/*
2684 * This fxn is called from libFC when host is removed
2685 */
2686void fnic_scsi_abort_io(struct fc_lport *lp)
2687{
2688        int err = 0;
2689        unsigned long flags;
2690        enum fnic_state old_state;
2691        struct fnic *fnic = lport_priv(lp);
2692        DECLARE_COMPLETION_ONSTACK(remove_wait);
2693
2694        /* Issue firmware reset for fnic, wait for reset to complete */
2695retry_fw_reset:
2696        spin_lock_irqsave(&fnic->fnic_lock, flags);
2697        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2698                /* fw reset is in progress, poll for its completion */
2699                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2700                schedule_timeout(msecs_to_jiffies(100));
2701                goto retry_fw_reset;
2702        }
2703
2704        fnic->remove_wait = &remove_wait;
2705        old_state = fnic->state;
2706        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2707        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2708        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2709
2710        err = fnic_fw_reset_handler(fnic);
2711        if (err) {
2712                spin_lock_irqsave(&fnic->fnic_lock, flags);
2713                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2714                        fnic->state = old_state;
2715                fnic->remove_wait = NULL;
2716                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2717                return;
2718        }
2719
2720        /* Wait for firmware reset to complete */
2721        wait_for_completion_timeout(&remove_wait,
2722                                    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2723
2724        spin_lock_irqsave(&fnic->fnic_lock, flags);
2725        fnic->remove_wait = NULL;
2726        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2727                      "fnic_scsi_abort_io %s\n",
2728                      (fnic->state == FNIC_IN_ETH_MODE) ?
2729                      "SUCCESS" : "FAILED");
2730        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2731
2732}
2733
2734/*
2735 * This fxn called from libFC to clean up driver IO state on link down
2736 */
2737void fnic_scsi_cleanup(struct fc_lport *lp)
2738{
2739        unsigned long flags;
2740        enum fnic_state old_state;
2741        struct fnic *fnic = lport_priv(lp);
2742
2743        /* issue fw reset */
2744retry_fw_reset:
2745        spin_lock_irqsave(&fnic->fnic_lock, flags);
2746        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2747                /* fw reset is in progress, poll for its completion */
2748                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2749                schedule_timeout(msecs_to_jiffies(100));
2750                goto retry_fw_reset;
2751        }
2752        old_state = fnic->state;
2753        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2754        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2755        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2756
2757        if (fnic_fw_reset_handler(fnic)) {
2758                spin_lock_irqsave(&fnic->fnic_lock, flags);
2759                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2760                        fnic->state = old_state;
2761                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2762        }
2763
2764}
2765
2766void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2767{
2768}
2769
2770void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2771{
2772        struct fnic *fnic = lport_priv(lp);
2773
2774        /* Non-zero sid, nothing to do */
2775        if (sid)
2776                goto call_fc_exch_mgr_reset;
2777
2778        if (did) {
2779                fnic_rport_exch_reset(fnic, did);
2780                goto call_fc_exch_mgr_reset;
2781        }
2782
2783        /*
2784         * sid = 0, did = 0
2785         * link down or device being removed
2786         */
2787        if (!fnic->in_remove)
2788                fnic_scsi_cleanup(lp);
2789        else
2790                fnic_scsi_abort_io(lp);
2791
2792        /* call libFC exch mgr reset to reset its exchanges */
2793call_fc_exch_mgr_reset:
2794        fc_exch_mgr_reset(lp, sid, did);
2795
2796}
2797
2798/*
2799 * fnic_is_abts_pending() is a helper function that
2800 * walks through tag map to check if there is any IOs pending,if there is one,
2801 * then it returns 1 (true), otherwise 0 (false)
2802 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2803 * otherwise, it checks for all IOs.
2804 */
2805int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2806{
2807        int tag;
2808        struct fnic_io_req *io_req;
2809        spinlock_t *io_lock;
2810        unsigned long flags;
2811        int ret = 0;
2812        struct scsi_cmnd *sc;
2813        struct scsi_device *lun_dev = NULL;
2814
2815        if (lr_sc)
2816                lun_dev = lr_sc->device;
2817
2818        /* walk again to check, if IOs are still pending in fw */
2819        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2820                sc = scsi_host_find_tag(fnic->lport->host, tag);
2821                /*
2822                 * ignore this lun reset cmd or cmds that do not belong to
2823                 * this lun
2824                 */
2825                if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2826                        continue;
2827
2828                io_lock = fnic_io_lock_hash(fnic, sc);
2829                spin_lock_irqsave(io_lock, flags);
2830
2831                io_req = (struct fnic_io_req *)CMD_SP(sc);
2832
2833                if (!io_req || sc->device != lun_dev) {
2834                        spin_unlock_irqrestore(io_lock, flags);
2835                        continue;
2836                }
2837
2838                /*
2839                 * Found IO that is still pending with firmware and
2840                 * belongs to the LUN that we are resetting
2841                 */
2842                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2843                              "Found IO in %s on lun\n",
2844                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2845
2846                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2847                        ret = 1;
2848                spin_unlock_irqrestore(io_lock, flags);
2849        }
2850
2851        return ret;
2852}
2853