linux/drivers/scsi/fnic/fnic_scsi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#include <linux/mempool.h>
  19#include <linux/errno.h>
  20#include <linux/init.h>
  21#include <linux/workqueue.h>
  22#include <linux/pci.h>
  23#include <linux/scatterlist.h>
  24#include <linux/skbuff.h>
  25#include <linux/spinlock.h>
  26#include <linux/if_ether.h>
  27#include <linux/if_vlan.h>
  28#include <linux/delay.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_host.h>
  31#include <scsi/scsi_device.h>
  32#include <scsi/scsi_cmnd.h>
  33#include <scsi/scsi_tcq.h>
  34#include <scsi/fc/fc_els.h>
  35#include <scsi/fc/fc_fcoe.h>
  36#include <scsi/libfc.h>
  37#include <scsi/fc_frame.h>
  38#include "fnic_io.h"
  39#include "fnic.h"
  40
  41const char *fnic_state_str[] = {
  42        [FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
  43        [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  44        [FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
  45        [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  46};
  47
  48static const char *fnic_ioreq_state_str[] = {
  49        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  50        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  51        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  52        [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  53};
  54
  55static const char *fcpio_status_str[] =  {
  56        [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  57        [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  58        [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  59        [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  60        [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  61        [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  62        [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  63        [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  64        [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  65        [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  66        [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  67        [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  68        [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  69        [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  70        [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  71        [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  72        [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  73        [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  74        [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  75};
  76
  77const char *fnic_state_to_str(unsigned int state)
  78{
  79        if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  80                return "unknown";
  81
  82        return fnic_state_str[state];
  83}
  84
  85static const char *fnic_ioreq_state_to_str(unsigned int state)
  86{
  87        if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  88            !fnic_ioreq_state_str[state])
  89                return "unknown";
  90
  91        return fnic_ioreq_state_str[state];
  92}
  93
  94static const char *fnic_fcpio_status_to_str(unsigned int status)
  95{
  96        if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
  97                return "unknown";
  98
  99        return fcpio_status_str[status];
 100}
 101
 102static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
 103
 104static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
 105                                            struct scsi_cmnd *sc)
 106{
 107        u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
 108
 109        return &fnic->io_req_lock[hash];
 110}
 111
 112/*
 113 * Unmap the data buffer and sense buffer for an io_req,
 114 * also unmap and free the device-private scatter/gather list.
 115 */
 116static void fnic_release_ioreq_buf(struct fnic *fnic,
 117                                   struct fnic_io_req *io_req,
 118                                   struct scsi_cmnd *sc)
 119{
 120        if (io_req->sgl_list_pa)
 121                pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
 122                                 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
 123                                 PCI_DMA_TODEVICE);
 124        scsi_dma_unmap(sc);
 125
 126        if (io_req->sgl_cnt)
 127                mempool_free(io_req->sgl_list_alloc,
 128                             fnic->io_sgl_pool[io_req->sgl_type]);
 129        if (io_req->sense_buf_pa)
 130                pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
 131                                 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
 132}
 133
 134/* Free up Copy Wq descriptors. Called with copy_wq lock held */
 135static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 136{
 137        /* if no Ack received from firmware, then nothing to clean */
 138        if (!fnic->fw_ack_recd[0])
 139                return 1;
 140
 141        /*
 142         * Update desc_available count based on number of freed descriptors
 143         * Account for wraparound
 144         */
 145        if (wq->to_clean_index <= fnic->fw_ack_index[0])
 146                wq->ring.desc_avail += (fnic->fw_ack_index[0]
 147                                        - wq->to_clean_index + 1);
 148        else
 149                wq->ring.desc_avail += (wq->ring.desc_count
 150                                        - wq->to_clean_index
 151                                        + fnic->fw_ack_index[0] + 1);
 152
 153        /*
 154         * just bump clean index to ack_index+1 accounting for wraparound
 155         * this will essentially free up all descriptors between
 156         * to_clean_index and fw_ack_index, both inclusive
 157         */
 158        wq->to_clean_index =
 159                (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
 160
 161        /* we have processed the acks received so far */
 162        fnic->fw_ack_recd[0] = 0;
 163        return 0;
 164}
 165
 166
 167/*
 168 * fnic_fw_reset_handler
 169 * Routine to send reset msg to fw
 170 */
 171int fnic_fw_reset_handler(struct fnic *fnic)
 172{
 173        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 174        int ret = 0;
 175        unsigned long flags;
 176
 177        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 178
 179        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 180                free_wq_copy_descs(fnic, wq);
 181
 182        if (!vnic_wq_copy_desc_avail(wq))
 183                ret = -EAGAIN;
 184        else
 185                fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
 186
 187        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 188
 189        if (!ret)
 190                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 191                              "Issued fw reset\n");
 192        else
 193                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 194                              "Failed to issue fw reset\n");
 195        return ret;
 196}
 197
 198
 199/*
 200 * fnic_flogi_reg_handler
 201 * Routine to send flogi register msg to fw
 202 */
 203int fnic_flogi_reg_handler(struct fnic *fnic)
 204{
 205        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 206        u8 gw_mac[ETH_ALEN];
 207        int ret = 0;
 208        unsigned long flags;
 209
 210        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 211
 212        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 213                free_wq_copy_descs(fnic, wq);
 214
 215        if (!vnic_wq_copy_desc_avail(wq)) {
 216                ret = -EAGAIN;
 217                goto flogi_reg_ioreq_end;
 218        }
 219
 220        if (fnic->fcoui_mode)
 221                memset(gw_mac, 0xff, ETH_ALEN);
 222        else
 223                memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
 224
 225        fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
 226                                          FCPIO_FLOGI_REG_GW_DEST,
 227                                          fnic->s_id,
 228                                          gw_mac);
 229
 230flogi_reg_ioreq_end:
 231        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 232
 233        if (!ret)
 234                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 235                              "flog reg issued\n");
 236
 237        return ret;
 238}
 239
 240/*
 241 * fnic_queue_wq_copy_desc
 242 * Routine to enqueue a wq copy desc
 243 */
 244static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 245                                          struct vnic_wq_copy *wq,
 246                                          struct fnic_io_req *io_req,
 247                                          struct scsi_cmnd *sc,
 248                                          int sg_count)
 249{
 250        struct scatterlist *sg;
 251        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 252        struct fc_rport_libfc_priv *rp = rport->dd_data;
 253        struct host_sg_desc *desc;
 254        u8 pri_tag = 0;
 255        unsigned int i;
 256        unsigned long intr_flags;
 257        int flags;
 258        u8 exch_flags;
 259        struct scsi_lun fc_lun;
 260        char msg[2];
 261
 262        if (sg_count) {
 263                /* For each SGE, create a device desc entry */
 264                desc = io_req->sgl_list;
 265                for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
 266                        desc->addr = cpu_to_le64(sg_dma_address(sg));
 267                        desc->len = cpu_to_le32(sg_dma_len(sg));
 268                        desc->_resvd = 0;
 269                        desc++;
 270                }
 271
 272                io_req->sgl_list_pa = pci_map_single
 273                        (fnic->pdev,
 274                         io_req->sgl_list,
 275                         sizeof(io_req->sgl_list[0]) * sg_count,
 276                         PCI_DMA_TODEVICE);
 277        }
 278
 279        io_req->sense_buf_pa = pci_map_single(fnic->pdev,
 280                                              sc->sense_buffer,
 281                                              SCSI_SENSE_BUFFERSIZE,
 282                                              PCI_DMA_FROMDEVICE);
 283
 284        int_to_scsilun(sc->device->lun, &fc_lun);
 285
 286        pri_tag = FCPIO_ICMND_PTA_SIMPLE;
 287        msg[0] = MSG_SIMPLE_TAG;
 288        scsi_populate_tag_msg(sc, msg);
 289        if (msg[0] == MSG_ORDERED_TAG)
 290                pri_tag = FCPIO_ICMND_PTA_ORDERED;
 291
 292        /* Enqueue the descriptor in the Copy WQ */
 293        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 294
 295        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 296                free_wq_copy_descs(fnic, wq);
 297
 298        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
 299                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 300                return SCSI_MLQUEUE_HOST_BUSY;
 301        }
 302
 303        flags = 0;
 304        if (sc->sc_data_direction == DMA_FROM_DEVICE)
 305                flags = FCPIO_ICMND_RDDATA;
 306        else if (sc->sc_data_direction == DMA_TO_DEVICE)
 307                flags = FCPIO_ICMND_WRDATA;
 308
 309        exch_flags = 0;
 310        if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
 311            (rp->flags & FC_RP_FLAGS_RETRY))
 312                exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
 313
 314        fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
 315                                         0, exch_flags, io_req->sgl_cnt,
 316                                         SCSI_SENSE_BUFFERSIZE,
 317                                         io_req->sgl_list_pa,
 318                                         io_req->sense_buf_pa,
 319                                         0, /* scsi cmd ref, always 0 */
 320                                         pri_tag, /* scsi pri and tag */
 321                                         flags, /* command flags */
 322                                         sc->cmnd, scsi_bufflen(sc),
 323                                         fc_lun.scsi_lun, io_req->port_id,
 324                                         rport->maxframe_size, rp->r_a_tov,
 325                                         rp->e_d_tov);
 326
 327        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 328        return 0;
 329}
 330
 331/*
 332 * fnic_queuecommand
 333 * Routine to send a scsi cdb
 334 * Called with host_lock held and interrupts disabled.
 335 */
 336int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 337{
 338        struct fc_lport *lp;
 339        struct fc_rport *rport;
 340        struct fnic_io_req *io_req;
 341        struct fnic *fnic;
 342        struct vnic_wq_copy *wq;
 343        int ret;
 344        int sg_count;
 345        unsigned long flags;
 346        unsigned long ptr;
 347
 348        rport = starget_to_rport(scsi_target(sc->device));
 349        ret = fc_remote_port_chkready(rport);
 350        if (ret) {
 351                sc->result = ret;
 352                done(sc);
 353                return 0;
 354        }
 355
 356        lp = shost_priv(sc->device->host);
 357        if (lp->state != LPORT_ST_READY || !(lp->link_up))
 358                return SCSI_MLQUEUE_HOST_BUSY;
 359
 360        /*
 361         * Release host lock, use driver resource specific locks from here.
 362         * Don't re-enable interrupts in case they were disabled prior to the
 363         * caller disabling them.
 364         */
 365        spin_unlock(lp->host->host_lock);
 366
 367        /* Get a new io_req for this SCSI IO */
 368        fnic = lport_priv(lp);
 369
 370        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
 371        if (!io_req) {
 372                ret = SCSI_MLQUEUE_HOST_BUSY;
 373                goto out;
 374        }
 375        memset(io_req, 0, sizeof(*io_req));
 376
 377        /* Map the data buffer */
 378        sg_count = scsi_dma_map(sc);
 379        if (sg_count < 0) {
 380                mempool_free(io_req, fnic->io_req_pool);
 381                goto out;
 382        }
 383
 384        /* Determine the type of scatter/gather list we need */
 385        io_req->sgl_cnt = sg_count;
 386        io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
 387        if (sg_count > FNIC_DFLT_SG_DESC_CNT)
 388                io_req->sgl_type = FNIC_SGL_CACHE_MAX;
 389
 390        if (sg_count) {
 391                io_req->sgl_list =
 392                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
 393                                      GFP_ATOMIC | GFP_DMA);
 394                if (!io_req->sgl_list) {
 395                        ret = SCSI_MLQUEUE_HOST_BUSY;
 396                        scsi_dma_unmap(sc);
 397                        mempool_free(io_req, fnic->io_req_pool);
 398                        goto out;
 399                }
 400
 401                /* Cache sgl list allocated address before alignment */
 402                io_req->sgl_list_alloc = io_req->sgl_list;
 403                ptr = (unsigned long) io_req->sgl_list;
 404                if (ptr % FNIC_SG_DESC_ALIGN) {
 405                        io_req->sgl_list = (struct host_sg_desc *)
 406                                (((unsigned long) ptr
 407                                  + FNIC_SG_DESC_ALIGN - 1)
 408                                 & ~(FNIC_SG_DESC_ALIGN - 1));
 409                }
 410        }
 411
 412        /* initialize rest of io_req */
 413        io_req->port_id = rport->port_id;
 414        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
 415        CMD_SP(sc) = (char *)io_req;
 416        sc->scsi_done = done;
 417
 418        /* create copy wq desc and enqueue it */
 419        wq = &fnic->wq_copy[0];
 420        ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
 421        if (ret) {
 422                /*
 423                 * In case another thread cancelled the request,
 424                 * refetch the pointer under the lock.
 425                 */
 426                spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
 427
 428                spin_lock_irqsave(io_lock, flags);
 429                io_req = (struct fnic_io_req *)CMD_SP(sc);
 430                CMD_SP(sc) = NULL;
 431                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 432                spin_unlock_irqrestore(io_lock, flags);
 433                if (io_req) {
 434                        fnic_release_ioreq_buf(fnic, io_req, sc);
 435                        mempool_free(io_req, fnic->io_req_pool);
 436                }
 437        }
 438out:
 439        /* acquire host lock before returning to SCSI */
 440        spin_lock(lp->host->host_lock);
 441        return ret;
 442}
 443
 444/*
 445 * fnic_fcpio_fw_reset_cmpl_handler
 446 * Routine to handle fw reset completion
 447 */
 448static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 449                                            struct fcpio_fw_req *desc)
 450{
 451        u8 type;
 452        u8 hdr_status;
 453        struct fcpio_tag tag;
 454        int ret = 0;
 455        struct fc_frame *flogi;
 456        unsigned long flags;
 457
 458        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 459
 460        /* Clean up all outstanding io requests */
 461        fnic_cleanup_io(fnic, SCSI_NO_TAG);
 462
 463        spin_lock_irqsave(&fnic->fnic_lock, flags);
 464
 465        flogi = fnic->flogi;
 466        fnic->flogi = NULL;
 467
 468        /* fnic should be in FC_TRANS_ETH_MODE */
 469        if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
 470                /* Check status of reset completion */
 471                if (!hdr_status) {
 472                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 473                                      "reset cmpl success\n");
 474                        /* Ready to send flogi out */
 475                        fnic->state = FNIC_IN_ETH_MODE;
 476                } else {
 477                        FNIC_SCSI_DBG(KERN_DEBUG,
 478                                      fnic->lport->host,
 479                                      "fnic fw_reset : failed %s\n",
 480                                      fnic_fcpio_status_to_str(hdr_status));
 481
 482                        /*
 483                         * Unable to change to eth mode, cannot send out flogi
 484                         * Change state to fc mode, so that subsequent Flogi
 485                         * requests from libFC will cause more attempts to
 486                         * reset the firmware. Free the cached flogi
 487                         */
 488                        fnic->state = FNIC_IN_FC_MODE;
 489                        ret = -1;
 490                }
 491        } else {
 492                FNIC_SCSI_DBG(KERN_DEBUG,
 493                              fnic->lport->host,
 494                              "Unexpected state %s while processing"
 495                              " reset cmpl\n", fnic_state_to_str(fnic->state));
 496                ret = -1;
 497        }
 498
 499        /* Thread removing device blocks till firmware reset is complete */
 500        if (fnic->remove_wait)
 501                complete(fnic->remove_wait);
 502
 503        /*
 504         * If fnic is being removed, or fw reset failed
 505         * free the flogi frame. Else, send it out
 506         */
 507        if (fnic->remove_wait || ret) {
 508                fnic->flogi_oxid = FC_XID_UNKNOWN;
 509                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 510                if (flogi)
 511                        dev_kfree_skb_irq(fp_skb(flogi));
 512                goto reset_cmpl_handler_end;
 513        }
 514
 515        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 516
 517        if (flogi)
 518                ret = fnic_send_frame(fnic, flogi);
 519
 520 reset_cmpl_handler_end:
 521        return ret;
 522}
 523
 524/*
 525 * fnic_fcpio_flogi_reg_cmpl_handler
 526 * Routine to handle flogi register completion
 527 */
 528static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 529                                             struct fcpio_fw_req *desc)
 530{
 531        u8 type;
 532        u8 hdr_status;
 533        struct fcpio_tag tag;
 534        int ret = 0;
 535        struct fc_frame *flogi_resp = NULL;
 536        unsigned long flags;
 537        struct sk_buff *skb;
 538
 539        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 540
 541        /* Update fnic state based on status of flogi reg completion */
 542        spin_lock_irqsave(&fnic->fnic_lock, flags);
 543
 544        flogi_resp = fnic->flogi_resp;
 545        fnic->flogi_resp = NULL;
 546
 547        if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
 548
 549                /* Check flogi registration completion status */
 550                if (!hdr_status) {
 551                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 552                                      "flog reg succeeded\n");
 553                        fnic->state = FNIC_IN_FC_MODE;
 554                } else {
 555                        FNIC_SCSI_DBG(KERN_DEBUG,
 556                                      fnic->lport->host,
 557                                      "fnic flogi reg :failed %s\n",
 558                                      fnic_fcpio_status_to_str(hdr_status));
 559                        fnic->state = FNIC_IN_ETH_MODE;
 560                        ret = -1;
 561                }
 562        } else {
 563                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 564                              "Unexpected fnic state %s while"
 565                              " processing flogi reg completion\n",
 566                              fnic_state_to_str(fnic->state));
 567                ret = -1;
 568        }
 569
 570        /* Successful flogi reg cmpl, pass frame to LibFC */
 571        if (!ret && flogi_resp) {
 572                if (fnic->stop_rx_link_events) {
 573                        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 574                        goto reg_cmpl_handler_end;
 575                }
 576                skb = (struct sk_buff *)flogi_resp;
 577                /* Use fr_flags to indicate whether flogi resp or not */
 578                fr_flags(flogi_resp) = 1;
 579                fr_dev(flogi_resp) = fnic->lport;
 580                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 581
 582                skb_queue_tail(&fnic->frame_queue, skb);
 583                queue_work(fnic_event_queue, &fnic->frame_work);
 584
 585        } else {
 586                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 587                if (flogi_resp)
 588                        dev_kfree_skb_irq(fp_skb(flogi_resp));
 589        }
 590
 591reg_cmpl_handler_end:
 592        return ret;
 593}
 594
 595static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
 596                                        u16 request_out)
 597{
 598        if (wq->to_clean_index <= wq->to_use_index) {
 599                /* out of range, stale request_out index */
 600                if (request_out < wq->to_clean_index ||
 601                    request_out >= wq->to_use_index)
 602                        return 0;
 603        } else {
 604                /* out of range, stale request_out index */
 605                if (request_out < wq->to_clean_index &&
 606                    request_out >= wq->to_use_index)
 607                        return 0;
 608        }
 609        /* request_out index is in range */
 610        return 1;
 611}
 612
 613
 614/*
 615 * Mark that ack received and store the Ack index. If there are multiple
 616 * acks received before Tx thread cleans it up, the latest value will be
 617 * used which is correct behavior. This state should be in the copy Wq
 618 * instead of in the fnic
 619 */
 620static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
 621                                          unsigned int cq_index,
 622                                          struct fcpio_fw_req *desc)
 623{
 624        struct vnic_wq_copy *wq;
 625        u16 request_out = desc->u.ack.request_out;
 626        unsigned long flags;
 627
 628        /* mark the ack state */
 629        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
 630        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 631
 632        if (is_ack_index_in_range(wq, request_out)) {
 633                fnic->fw_ack_index[0] = request_out;
 634                fnic->fw_ack_recd[0] = 1;
 635        }
 636        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 637}
 638
 639/*
 640 * fnic_fcpio_icmnd_cmpl_handler
 641 * Routine to handle icmnd completions
 642 */
 643static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 644                                         struct fcpio_fw_req *desc)
 645{
 646        u8 type;
 647        u8 hdr_status;
 648        struct fcpio_tag tag;
 649        u32 id;
 650        u64 xfer_len = 0;
 651        struct fcpio_icmnd_cmpl *icmnd_cmpl;
 652        struct fnic_io_req *io_req;
 653        struct scsi_cmnd *sc;
 654        unsigned long flags;
 655        spinlock_t *io_lock;
 656
 657        /* Decode the cmpl description to get the io_req id */
 658        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 659        fcpio_tag_id_dec(&tag, &id);
 660
 661        if (id >= FNIC_MAX_IO_REQ)
 662                return;
 663
 664        sc = scsi_host_find_tag(fnic->lport->host, id);
 665        WARN_ON_ONCE(!sc);
 666        if (!sc)
 667                return;
 668
 669        io_lock = fnic_io_lock_hash(fnic, sc);
 670        spin_lock_irqsave(io_lock, flags);
 671        io_req = (struct fnic_io_req *)CMD_SP(sc);
 672        WARN_ON_ONCE(!io_req);
 673        if (!io_req) {
 674                spin_unlock_irqrestore(io_lock, flags);
 675                return;
 676        }
 677
 678        /* firmware completed the io */
 679        io_req->io_completed = 1;
 680
 681        /*
 682         *  if SCSI-ML has already issued abort on this command,
 683         * ignore completion of the IO. The abts path will clean it up
 684         */
 685        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
 686                spin_unlock_irqrestore(io_lock, flags);
 687                return;
 688        }
 689
 690        /* Mark the IO as complete */
 691        CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 692
 693        icmnd_cmpl = &desc->u.icmnd_cmpl;
 694
 695        switch (hdr_status) {
 696        case FCPIO_SUCCESS:
 697                sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
 698                xfer_len = scsi_bufflen(sc);
 699                scsi_set_resid(sc, icmnd_cmpl->residual);
 700
 701                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 702                        xfer_len -= icmnd_cmpl->residual;
 703
 704                /*
 705                 * If queue_full, then try to reduce queue depth for all
 706                 * LUNS on the target. Todo: this should be accompanied
 707                 * by a periodic queue_depth rampup based on successful
 708                 * IO completion.
 709                 */
 710                if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
 711                        struct scsi_device *t_sdev;
 712                        int qd = 0;
 713
 714                        shost_for_each_device(t_sdev, sc->device->host) {
 715                                if (t_sdev->id != sc->device->id)
 716                                        continue;
 717
 718                                if (t_sdev->queue_depth > 1) {
 719                                        qd = scsi_track_queue_full
 720                                                (t_sdev,
 721                                                 t_sdev->queue_depth - 1);
 722                                        if (qd == -1)
 723                                                qd = t_sdev->host->cmd_per_lun;
 724                                        shost_printk(KERN_INFO,
 725                                                     fnic->lport->host,
 726                                                     "scsi[%d:%d:%d:%d"
 727                                                     "] queue full detected,"
 728                                                     "new depth = %d\n",
 729                                                     t_sdev->host->host_no,
 730                                                     t_sdev->channel,
 731                                                     t_sdev->id, t_sdev->lun,
 732                                                     t_sdev->queue_depth);
 733                                }
 734                        }
 735                }
 736                break;
 737
 738        case FCPIO_TIMEOUT:          /* request was timed out */
 739                sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
 740                break;
 741
 742        case FCPIO_ABORTED:          /* request was aborted */
 743                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 744                break;
 745
 746        case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
 747                scsi_set_resid(sc, icmnd_cmpl->residual);
 748                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 749                break;
 750
 751        case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
 752                sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
 753                break;
 754        case FCPIO_INVALID_HEADER:   /* header contains invalid data */
 755        case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
 756        case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
 757        case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
 758        case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
 759        case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
 760        case FCPIO_FW_ERR:           /* request was terminated due fw error */
 761        default:
 762                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
 763                             fnic_fcpio_status_to_str(hdr_status));
 764                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 765                break;
 766        }
 767
 768        /* Break link with the SCSI command */
 769        CMD_SP(sc) = NULL;
 770
 771        spin_unlock_irqrestore(io_lock, flags);
 772
 773        fnic_release_ioreq_buf(fnic, io_req, sc);
 774
 775        mempool_free(io_req, fnic->io_req_pool);
 776
 777        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
 778                fnic->lport->host_stats.fcp_input_requests++;
 779                fnic->fcp_input_bytes += xfer_len;
 780        } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
 781                fnic->lport->host_stats.fcp_output_requests++;
 782                fnic->fcp_output_bytes += xfer_len;
 783        } else
 784                fnic->lport->host_stats.fcp_control_requests++;
 785
 786        /* Call SCSI completion function to complete the IO */
 787        if (sc->scsi_done)
 788                sc->scsi_done(sc);
 789
 790}
 791
 792/* fnic_fcpio_itmf_cmpl_handler
 793 * Routine to handle itmf completions
 794 */
 795static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
 796                                        struct fcpio_fw_req *desc)
 797{
 798        u8 type;
 799        u8 hdr_status;
 800        struct fcpio_tag tag;
 801        u32 id;
 802        struct scsi_cmnd *sc;
 803        struct fnic_io_req *io_req;
 804        unsigned long flags;
 805        spinlock_t *io_lock;
 806
 807        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 808        fcpio_tag_id_dec(&tag, &id);
 809
 810        if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
 811                return;
 812
 813        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
 814        WARN_ON_ONCE(!sc);
 815        if (!sc)
 816                return;
 817
 818        io_lock = fnic_io_lock_hash(fnic, sc);
 819        spin_lock_irqsave(io_lock, flags);
 820        io_req = (struct fnic_io_req *)CMD_SP(sc);
 821        WARN_ON_ONCE(!io_req);
 822        if (!io_req) {
 823                spin_unlock_irqrestore(io_lock, flags);
 824                return;
 825        }
 826
 827        if (id & FNIC_TAG_ABORT) {
 828                /* Completion of abort cmd */
 829                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
 830                        /* This is a late completion. Ignore it */
 831                        spin_unlock_irqrestore(io_lock, flags);
 832                        return;
 833                }
 834                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
 835                CMD_ABTS_STATUS(sc) = hdr_status;
 836
 837                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 838                              "abts cmpl recd. id %d status %s\n",
 839                              (int)(id & FNIC_TAG_MASK),
 840                              fnic_fcpio_status_to_str(hdr_status));
 841
 842                /*
 843                 * If scsi_eh thread is blocked waiting for abts to complete,
 844                 * signal completion to it. IO will be cleaned in the thread
 845                 * else clean it in this context
 846                 */
 847                if (io_req->abts_done) {
 848                        complete(io_req->abts_done);
 849                        spin_unlock_irqrestore(io_lock, flags);
 850                } else {
 851                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 852                                      "abts cmpl, completing IO\n");
 853                        CMD_SP(sc) = NULL;
 854                        sc->result = (DID_ERROR << 16);
 855
 856                        spin_unlock_irqrestore(io_lock, flags);
 857
 858                        fnic_release_ioreq_buf(fnic, io_req, sc);
 859                        mempool_free(io_req, fnic->io_req_pool);
 860                        if (sc->scsi_done)
 861                                sc->scsi_done(sc);
 862                }
 863
 864        } else if (id & FNIC_TAG_DEV_RST) {
 865                /* Completion of device reset */
 866                CMD_LR_STATUS(sc) = hdr_status;
 867                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 868                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 869                              "dev reset cmpl recd. id %d status %s\n",
 870                              (int)(id & FNIC_TAG_MASK),
 871                              fnic_fcpio_status_to_str(hdr_status));
 872                if (io_req->dr_done)
 873                        complete(io_req->dr_done);
 874                spin_unlock_irqrestore(io_lock, flags);
 875
 876        } else {
 877                shost_printk(KERN_ERR, fnic->lport->host,
 878                             "Unexpected itmf io state %s tag %x\n",
 879                             fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
 880                spin_unlock_irqrestore(io_lock, flags);
 881        }
 882
 883}
 884
 885/*
 886 * fnic_fcpio_cmpl_handler
 887 * Routine to service the cq for wq_copy
 888 */
 889static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
 890                                   unsigned int cq_index,
 891                                   struct fcpio_fw_req *desc)
 892{
 893        struct fnic *fnic = vnic_dev_priv(vdev);
 894        int ret = 0;
 895
 896        switch (desc->hdr.type) {
 897        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
 898                fnic_fcpio_ack_handler(fnic, cq_index, desc);
 899                break;
 900
 901        case FCPIO_ICMND_CMPL: /* fw completed a command */
 902                fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
 903                break;
 904
 905        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
 906                fnic_fcpio_itmf_cmpl_handler(fnic, desc);
 907                break;
 908
 909        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
 910                ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
 911                break;
 912
 913        case FCPIO_RESET_CMPL: /* fw completed reset */
 914                ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
 915                break;
 916
 917        default:
 918                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 919                              "firmware completion type %d\n",
 920                              desc->hdr.type);
 921                break;
 922        }
 923
 924        return ret;
 925}
 926
 927/*
 928 * fnic_wq_copy_cmpl_handler
 929 * Routine to process wq copy
 930 */
 931int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
 932{
 933        unsigned int wq_work_done = 0;
 934        unsigned int i, cq_index;
 935        unsigned int cur_work_done;
 936
 937        for (i = 0; i < fnic->wq_copy_count; i++) {
 938                cq_index = i + fnic->raw_wq_count + fnic->rq_count;
 939                cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
 940                                                     fnic_fcpio_cmpl_handler,
 941                                                     copy_work_to_do);
 942                wq_work_done += cur_work_done;
 943        }
 944        return wq_work_done;
 945}
 946
 947static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
 948{
 949        unsigned int i;
 950        struct fnic_io_req *io_req;
 951        unsigned long flags = 0;
 952        struct scsi_cmnd *sc;
 953        spinlock_t *io_lock;
 954
 955        for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
 956                if (i == exclude_id)
 957                        continue;
 958
 959                sc = scsi_host_find_tag(fnic->lport->host, i);
 960                if (!sc)
 961                        continue;
 962
 963                io_lock = fnic_io_lock_hash(fnic, sc);
 964                spin_lock_irqsave(io_lock, flags);
 965                io_req = (struct fnic_io_req *)CMD_SP(sc);
 966                if (!io_req) {
 967                        spin_unlock_irqrestore(io_lock, flags);
 968                        goto cleanup_scsi_cmd;
 969                }
 970
 971                CMD_SP(sc) = NULL;
 972
 973                spin_unlock_irqrestore(io_lock, flags);
 974
 975                /*
 976                 * If there is a scsi_cmnd associated with this io_req, then
 977                 * free the corresponding state
 978                 */
 979                fnic_release_ioreq_buf(fnic, io_req, sc);
 980                mempool_free(io_req, fnic->io_req_pool);
 981
 982cleanup_scsi_cmd:
 983                sc->result = DID_TRANSPORT_DISRUPTED << 16;
 984                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
 985                              " DID_TRANSPORT_DISRUPTED\n");
 986
 987                /* Complete the command to SCSI */
 988                if (sc->scsi_done)
 989                        sc->scsi_done(sc);
 990        }
 991}
 992
 993void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
 994                                  struct fcpio_host_req *desc)
 995{
 996        u32 id;
 997        struct fnic *fnic = vnic_dev_priv(wq->vdev);
 998        struct fnic_io_req *io_req;
 999        struct scsi_cmnd *sc;
1000        unsigned long flags;
1001        spinlock_t *io_lock;
1002
1003        /* get the tag reference */
1004        fcpio_tag_id_dec(&desc->hdr.tag, &id);
1005        id &= FNIC_TAG_MASK;
1006
1007        if (id >= FNIC_MAX_IO_REQ)
1008                return;
1009
1010        sc = scsi_host_find_tag(fnic->lport->host, id);
1011        if (!sc)
1012                return;
1013
1014        io_lock = fnic_io_lock_hash(fnic, sc);
1015        spin_lock_irqsave(io_lock, flags);
1016
1017        /* Get the IO context which this desc refers to */
1018        io_req = (struct fnic_io_req *)CMD_SP(sc);
1019
1020        /* fnic interrupts are turned off by now */
1021
1022        if (!io_req) {
1023                spin_unlock_irqrestore(io_lock, flags);
1024                goto wq_copy_cleanup_scsi_cmd;
1025        }
1026
1027        CMD_SP(sc) = NULL;
1028
1029        spin_unlock_irqrestore(io_lock, flags);
1030
1031        fnic_release_ioreq_buf(fnic, io_req, sc);
1032        mempool_free(io_req, fnic->io_req_pool);
1033
1034wq_copy_cleanup_scsi_cmd:
1035        sc->result = DID_NO_CONNECT << 16;
1036        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1037                      " DID_NO_CONNECT\n");
1038
1039        if (sc->scsi_done)
1040                sc->scsi_done(sc);
1041}
1042
1043static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1044                                          u32 task_req, u8 *fc_lun,
1045                                          struct fnic_io_req *io_req)
1046{
1047        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1048        unsigned long flags;
1049
1050        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1051
1052        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1053                free_wq_copy_descs(fnic, wq);
1054
1055        if (!vnic_wq_copy_desc_avail(wq)) {
1056                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1057                return 1;
1058        }
1059        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1060                                     0, task_req, tag, fc_lun, io_req->port_id,
1061                                     fnic->config.ra_tov, fnic->config.ed_tov);
1062
1063        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1064        return 0;
1065}
1066
1067void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1068{
1069        int tag;
1070        struct fnic_io_req *io_req;
1071        spinlock_t *io_lock;
1072        unsigned long flags;
1073        struct scsi_cmnd *sc;
1074        struct scsi_lun fc_lun;
1075        enum fnic_ioreq_state old_ioreq_state;
1076
1077        FNIC_SCSI_DBG(KERN_DEBUG,
1078                      fnic->lport->host,
1079                      "fnic_rport_reset_exch called portid 0x%06x\n",
1080                      port_id);
1081
1082        if (fnic->in_remove)
1083                return;
1084
1085        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1086                sc = scsi_host_find_tag(fnic->lport->host, tag);
1087                if (!sc)
1088                        continue;
1089
1090                io_lock = fnic_io_lock_hash(fnic, sc);
1091                spin_lock_irqsave(io_lock, flags);
1092
1093                io_req = (struct fnic_io_req *)CMD_SP(sc);
1094
1095                if (!io_req || io_req->port_id != port_id) {
1096                        spin_unlock_irqrestore(io_lock, flags);
1097                        continue;
1098                }
1099
1100                /*
1101                 * Found IO that is still pending with firmware and
1102                 * belongs to rport that went away
1103                 */
1104                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1105                        spin_unlock_irqrestore(io_lock, flags);
1106                        continue;
1107                }
1108                old_ioreq_state = CMD_STATE(sc);
1109                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1110                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1111
1112                BUG_ON(io_req->abts_done);
1113
1114                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1115                              "fnic_rport_reset_exch: Issuing abts\n");
1116
1117                spin_unlock_irqrestore(io_lock, flags);
1118
1119                /* Now queue the abort command to firmware */
1120                int_to_scsilun(sc->device->lun, &fc_lun);
1121
1122                if (fnic_queue_abort_io_req(fnic, tag,
1123                                            FCPIO_ITMF_ABT_TASK_TERM,
1124                                            fc_lun.scsi_lun, io_req)) {
1125                        /*
1126                         * Revert the cmd state back to old state, if
1127                         * it hasnt changed in between. This cmd will get
1128                         * aborted later by scsi_eh, or cleaned up during
1129                         * lun reset
1130                         */
1131                        io_lock = fnic_io_lock_hash(fnic, sc);
1132
1133                        spin_lock_irqsave(io_lock, flags);
1134                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1135                                CMD_STATE(sc) = old_ioreq_state;
1136                        spin_unlock_irqrestore(io_lock, flags);
1137                }
1138        }
1139
1140}
1141
1142void fnic_terminate_rport_io(struct fc_rport *rport)
1143{
1144        int tag;
1145        struct fnic_io_req *io_req;
1146        spinlock_t *io_lock;
1147        unsigned long flags;
1148        struct scsi_cmnd *sc;
1149        struct scsi_lun fc_lun;
1150        struct fc_rport_libfc_priv *rdata = rport->dd_data;
1151        struct fc_lport *lport = rdata->local_port;
1152        struct fnic *fnic = lport_priv(lport);
1153        struct fc_rport *cmd_rport;
1154        enum fnic_ioreq_state old_ioreq_state;
1155
1156        FNIC_SCSI_DBG(KERN_DEBUG,
1157                      fnic->lport->host, "fnic_terminate_rport_io called"
1158                      " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
1159                      rport->port_name, rport->node_name,
1160                      rport->port_id);
1161
1162        if (fnic->in_remove)
1163                return;
1164
1165        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1166                sc = scsi_host_find_tag(fnic->lport->host, tag);
1167                if (!sc)
1168                        continue;
1169
1170                cmd_rport = starget_to_rport(scsi_target(sc->device));
1171                if (rport != cmd_rport)
1172                        continue;
1173
1174                io_lock = fnic_io_lock_hash(fnic, sc);
1175                spin_lock_irqsave(io_lock, flags);
1176
1177                io_req = (struct fnic_io_req *)CMD_SP(sc);
1178
1179                if (!io_req || rport != cmd_rport) {
1180                        spin_unlock_irqrestore(io_lock, flags);
1181                        continue;
1182                }
1183
1184                /*
1185                 * Found IO that is still pending with firmware and
1186                 * belongs to rport that went away
1187                 */
1188                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1189                        spin_unlock_irqrestore(io_lock, flags);
1190                        continue;
1191                }
1192                old_ioreq_state = CMD_STATE(sc);
1193                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1194                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1195
1196                BUG_ON(io_req->abts_done);
1197
1198                FNIC_SCSI_DBG(KERN_DEBUG,
1199                              fnic->lport->host,
1200                              "fnic_terminate_rport_io: Issuing abts\n");
1201
1202                spin_unlock_irqrestore(io_lock, flags);
1203
1204                /* Now queue the abort command to firmware */
1205                int_to_scsilun(sc->device->lun, &fc_lun);
1206
1207                if (fnic_queue_abort_io_req(fnic, tag,
1208                                            FCPIO_ITMF_ABT_TASK_TERM,
1209                                            fc_lun.scsi_lun, io_req)) {
1210                        /*
1211                         * Revert the cmd state back to old state, if
1212                         * it hasnt changed in between. This cmd will get
1213                         * aborted later by scsi_eh, or cleaned up during
1214                         * lun reset
1215                         */
1216                        io_lock = fnic_io_lock_hash(fnic, sc);
1217
1218                        spin_lock_irqsave(io_lock, flags);
1219                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1220                                CMD_STATE(sc) = old_ioreq_state;
1221                        spin_unlock_irqrestore(io_lock, flags);
1222                }
1223        }
1224
1225}
1226
1227static void fnic_block_error_handler(struct scsi_cmnd *sc)
1228{
1229        struct Scsi_Host *shost = sc->device->host;
1230        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
1231        unsigned long flags;
1232
1233        spin_lock_irqsave(shost->host_lock, flags);
1234        while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1235                spin_unlock_irqrestore(shost->host_lock, flags);
1236                msleep(1000);
1237                spin_lock_irqsave(shost->host_lock, flags);
1238        }
1239        spin_unlock_irqrestore(shost->host_lock, flags);
1240
1241}
1242
1243/*
1244 * This function is exported to SCSI for sending abort cmnds.
1245 * A SCSI IO is represented by a io_req in the driver.
1246 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1247 */
1248int fnic_abort_cmd(struct scsi_cmnd *sc)
1249{
1250        struct fc_lport *lp;
1251        struct fnic *fnic;
1252        struct fnic_io_req *io_req;
1253        struct fc_rport *rport;
1254        spinlock_t *io_lock;
1255        unsigned long flags;
1256        int ret = SUCCESS;
1257        u32 task_req;
1258        struct scsi_lun fc_lun;
1259        DECLARE_COMPLETION_ONSTACK(tm_done);
1260
1261        /* Wait for rport to unblock */
1262        fnic_block_error_handler(sc);
1263
1264        /* Get local-port, check ready and link up */
1265        lp = shost_priv(sc->device->host);
1266
1267        fnic = lport_priv(lp);
1268        FNIC_SCSI_DBG(KERN_DEBUG,
1269                      fnic->lport->host,
1270                      "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
1271                      (starget_to_rport(scsi_target(sc->device)))->port_id,
1272                      sc->device->lun, sc->request->tag);
1273
1274        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1275                ret = FAILED;
1276                goto fnic_abort_cmd_end;
1277        }
1278
1279        /*
1280         * Avoid a race between SCSI issuing the abort and the device
1281         * completing the command.
1282         *
1283         * If the command is already completed by the fw cmpl code,
1284         * we just return SUCCESS from here. This means that the abort
1285         * succeeded. In the SCSI ML, since the timeout for command has
1286         * happened, the completion wont actually complete the command
1287         * and it will be considered as an aborted command
1288         *
1289         * The CMD_SP will not be cleared except while holding io_req_lock.
1290         */
1291        io_lock = fnic_io_lock_hash(fnic, sc);
1292        spin_lock_irqsave(io_lock, flags);
1293        io_req = (struct fnic_io_req *)CMD_SP(sc);
1294        if (!io_req) {
1295                spin_unlock_irqrestore(io_lock, flags);
1296                goto fnic_abort_cmd_end;
1297        }
1298
1299        io_req->abts_done = &tm_done;
1300
1301        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1302                spin_unlock_irqrestore(io_lock, flags);
1303                goto wait_pending;
1304        }
1305        /*
1306         * Command is still pending, need to abort it
1307         * If the firmware completes the command after this point,
1308         * the completion wont be done till mid-layer, since abort
1309         * has already started.
1310         */
1311        CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1312        CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1313
1314        spin_unlock_irqrestore(io_lock, flags);
1315
1316        /*
1317         * Check readiness of the remote port. If the path to remote
1318         * port is up, then send abts to the remote port to terminate
1319         * the IO. Else, just locally terminate the IO in the firmware
1320         */
1321        rport = starget_to_rport(scsi_target(sc->device));
1322        if (fc_remote_port_chkready(rport) == 0)
1323                task_req = FCPIO_ITMF_ABT_TASK;
1324        else
1325                task_req = FCPIO_ITMF_ABT_TASK_TERM;
1326
1327        /* Now queue the abort command to firmware */
1328        int_to_scsilun(sc->device->lun, &fc_lun);
1329
1330        if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1331                                    fc_lun.scsi_lun, io_req)) {
1332                spin_lock_irqsave(io_lock, flags);
1333                io_req = (struct fnic_io_req *)CMD_SP(sc);
1334                if (io_req)
1335                        io_req->abts_done = NULL;
1336                spin_unlock_irqrestore(io_lock, flags);
1337                ret = FAILED;
1338                goto fnic_abort_cmd_end;
1339        }
1340
1341        /*
1342         * We queued an abort IO, wait for its completion.
1343         * Once the firmware completes the abort command, it will
1344         * wake up this thread.
1345         */
1346 wait_pending:
1347        wait_for_completion_timeout(&tm_done,
1348                                    msecs_to_jiffies
1349                                    (2 * fnic->config.ra_tov +
1350                                     fnic->config.ed_tov));
1351
1352        /* Check the abort status */
1353        spin_lock_irqsave(io_lock, flags);
1354
1355        io_req = (struct fnic_io_req *)CMD_SP(sc);
1356        if (!io_req) {
1357                spin_unlock_irqrestore(io_lock, flags);
1358                ret = FAILED;
1359                goto fnic_abort_cmd_end;
1360        }
1361        io_req->abts_done = NULL;
1362
1363        /* fw did not complete abort, timed out */
1364        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1365                spin_unlock_irqrestore(io_lock, flags);
1366                ret = FAILED;
1367                goto fnic_abort_cmd_end;
1368        }
1369
1370        /*
1371         * firmware completed the abort, check the status,
1372         * free the io_req irrespective of failure or success
1373         */
1374        if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1375                ret = FAILED;
1376
1377        CMD_SP(sc) = NULL;
1378
1379        spin_unlock_irqrestore(io_lock, flags);
1380
1381        fnic_release_ioreq_buf(fnic, io_req, sc);
1382        mempool_free(io_req, fnic->io_req_pool);
1383
1384fnic_abort_cmd_end:
1385        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1386                      "Returning from abort cmd %s\n",
1387                      (ret == SUCCESS) ?
1388                      "SUCCESS" : "FAILED");
1389        return ret;
1390}
1391
1392static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1393                                       struct scsi_cmnd *sc,
1394                                       struct fnic_io_req *io_req)
1395{
1396        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1397        struct scsi_lun fc_lun;
1398        int ret = 0;
1399        unsigned long intr_flags;
1400
1401        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1402
1403        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1404                free_wq_copy_descs(fnic, wq);
1405
1406        if (!vnic_wq_copy_desc_avail(wq)) {
1407                ret = -EAGAIN;
1408                goto lr_io_req_end;
1409        }
1410
1411        /* fill in the lun info */
1412        int_to_scsilun(sc->device->lun, &fc_lun);
1413
1414        fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1415                                     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1416                                     fc_lun.scsi_lun, io_req->port_id,
1417                                     fnic->config.ra_tov, fnic->config.ed_tov);
1418
1419lr_io_req_end:
1420        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1421
1422        return ret;
1423}
1424
1425/*
1426 * Clean up any pending aborts on the lun
1427 * For each outstanding IO on this lun, whose abort is not completed by fw,
1428 * issue a local abort. Wait for abort to complete. Return 0 if all commands
1429 * successfully aborted, 1 otherwise
1430 */
1431static int fnic_clean_pending_aborts(struct fnic *fnic,
1432                                     struct scsi_cmnd *lr_sc)
1433{
1434        int tag;
1435        struct fnic_io_req *io_req;
1436        spinlock_t *io_lock;
1437        unsigned long flags;
1438        int ret = 0;
1439        struct scsi_cmnd *sc;
1440        struct fc_rport *rport;
1441        struct scsi_lun fc_lun;
1442        struct scsi_device *lun_dev = lr_sc->device;
1443        DECLARE_COMPLETION_ONSTACK(tm_done);
1444
1445        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1446                sc = scsi_host_find_tag(fnic->lport->host, tag);
1447                /*
1448                 * ignore this lun reset cmd or cmds that do not belong to
1449                 * this lun
1450                 */
1451                if (!sc || sc == lr_sc || sc->device != lun_dev)
1452                        continue;
1453
1454                io_lock = fnic_io_lock_hash(fnic, sc);
1455                spin_lock_irqsave(io_lock, flags);
1456
1457                io_req = (struct fnic_io_req *)CMD_SP(sc);
1458
1459                if (!io_req || sc->device != lun_dev) {
1460                        spin_unlock_irqrestore(io_lock, flags);
1461                        continue;
1462                }
1463
1464                /*
1465                 * Found IO that is still pending with firmware and
1466                 * belongs to the LUN that we are resetting
1467                 */
1468                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1469                              "Found IO in %s on lun\n",
1470                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
1471
1472                BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
1473
1474                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1475                io_req->abts_done = &tm_done;
1476                spin_unlock_irqrestore(io_lock, flags);
1477
1478                /* Now queue the abort command to firmware */
1479                int_to_scsilun(sc->device->lun, &fc_lun);
1480                rport = starget_to_rport(scsi_target(sc->device));
1481
1482                if (fnic_queue_abort_io_req(fnic, tag,
1483                                            FCPIO_ITMF_ABT_TASK_TERM,
1484                                            fc_lun.scsi_lun, io_req)) {
1485                        spin_lock_irqsave(io_lock, flags);
1486                        io_req = (struct fnic_io_req *)CMD_SP(sc);
1487                        if (io_req)
1488                                io_req->abts_done = NULL;
1489                        spin_unlock_irqrestore(io_lock, flags);
1490                        ret = 1;
1491                        goto clean_pending_aborts_end;
1492                }
1493
1494                wait_for_completion_timeout(&tm_done,
1495                                            msecs_to_jiffies
1496                                            (fnic->config.ed_tov));
1497
1498                /* Recheck cmd state to check if it is now aborted */
1499                spin_lock_irqsave(io_lock, flags);
1500                io_req = (struct fnic_io_req *)CMD_SP(sc);
1501                if (!io_req) {
1502                        spin_unlock_irqrestore(io_lock, flags);
1503                        ret = 1;
1504                        goto clean_pending_aborts_end;
1505                }
1506
1507                io_req->abts_done = NULL;
1508
1509                /* if abort is still pending with fw, fail */
1510                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1511                        spin_unlock_irqrestore(io_lock, flags);
1512                        ret = 1;
1513                        goto clean_pending_aborts_end;
1514                }
1515                CMD_SP(sc) = NULL;
1516                spin_unlock_irqrestore(io_lock, flags);
1517
1518                fnic_release_ioreq_buf(fnic, io_req, sc);
1519                mempool_free(io_req, fnic->io_req_pool);
1520        }
1521
1522clean_pending_aborts_end:
1523        return ret;
1524}
1525
1526/*
1527 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
1528 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
1529 * on the LUN.
1530 */
1531int fnic_device_reset(struct scsi_cmnd *sc)
1532{
1533        struct fc_lport *lp;
1534        struct fnic *fnic;
1535        struct fnic_io_req *io_req;
1536        struct fc_rport *rport;
1537        int status;
1538        int ret = FAILED;
1539        spinlock_t *io_lock;
1540        unsigned long flags;
1541        DECLARE_COMPLETION_ONSTACK(tm_done);
1542
1543        /* Wait for rport to unblock */
1544        fnic_block_error_handler(sc);
1545
1546        /* Get local-port, check ready and link up */
1547        lp = shost_priv(sc->device->host);
1548
1549        fnic = lport_priv(lp);
1550        FNIC_SCSI_DBG(KERN_DEBUG,
1551                      fnic->lport->host,
1552                      "Device reset called FCID 0x%x, LUN 0x%x\n",
1553                      (starget_to_rport(scsi_target(sc->device)))->port_id,
1554                      sc->device->lun);
1555
1556
1557        if (lp->state != LPORT_ST_READY || !(lp->link_up))
1558                goto fnic_device_reset_end;
1559
1560        /* Check if remote port up */
1561        rport = starget_to_rport(scsi_target(sc->device));
1562        if (fc_remote_port_chkready(rport))
1563                goto fnic_device_reset_end;
1564
1565        io_lock = fnic_io_lock_hash(fnic, sc);
1566        spin_lock_irqsave(io_lock, flags);
1567        io_req = (struct fnic_io_req *)CMD_SP(sc);
1568
1569        /*
1570         * If there is a io_req attached to this command, then use it,
1571         * else allocate a new one.
1572         */
1573        if (!io_req) {
1574                io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
1575                if (!io_req) {
1576                        spin_unlock_irqrestore(io_lock, flags);
1577                        goto fnic_device_reset_end;
1578                }
1579                memset(io_req, 0, sizeof(*io_req));
1580                io_req->port_id = rport->port_id;
1581                CMD_SP(sc) = (char *)io_req;
1582        }
1583        io_req->dr_done = &tm_done;
1584        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
1585        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
1586        spin_unlock_irqrestore(io_lock, flags);
1587
1588        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
1589                      sc->request->tag);
1590
1591        /*
1592         * issue the device reset, if enqueue failed, clean up the ioreq
1593         * and break assoc with scsi cmd
1594         */
1595        if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
1596                spin_lock_irqsave(io_lock, flags);
1597                io_req = (struct fnic_io_req *)CMD_SP(sc);
1598                if (io_req)
1599                        io_req->dr_done = NULL;
1600                goto fnic_device_reset_clean;
1601        }
1602
1603        /*
1604         * Wait on the local completion for LUN reset.  The io_req may be
1605         * freed while we wait since we hold no lock.
1606         */
1607        wait_for_completion_timeout(&tm_done,
1608                                    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
1609
1610        spin_lock_irqsave(io_lock, flags);
1611        io_req = (struct fnic_io_req *)CMD_SP(sc);
1612        if (!io_req) {
1613                spin_unlock_irqrestore(io_lock, flags);
1614                goto fnic_device_reset_end;
1615        }
1616        io_req->dr_done = NULL;
1617
1618        status = CMD_LR_STATUS(sc);
1619        spin_unlock_irqrestore(io_lock, flags);
1620
1621        /*
1622         * If lun reset not completed, bail out with failed. io_req
1623         * gets cleaned up during higher levels of EH
1624         */
1625        if (status == FCPIO_INVALID_CODE) {
1626                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1627                              "Device reset timed out\n");
1628                goto fnic_device_reset_end;
1629        }
1630
1631        /* Completed, but not successful, clean up the io_req, return fail */
1632        if (status != FCPIO_SUCCESS) {
1633                spin_lock_irqsave(io_lock, flags);
1634                FNIC_SCSI_DBG(KERN_DEBUG,
1635                              fnic->lport->host,
1636                              "Device reset completed - failed\n");
1637                io_req = (struct fnic_io_req *)CMD_SP(sc);
1638                goto fnic_device_reset_clean;
1639        }
1640
1641        /*
1642         * Clean up any aborts on this lun that have still not
1643         * completed. If any of these fail, then LUN reset fails.
1644         * clean_pending_aborts cleans all cmds on this lun except
1645         * the lun reset cmd. If all cmds get cleaned, the lun reset
1646         * succeeds
1647         */
1648        if (fnic_clean_pending_aborts(fnic, sc)) {
1649                spin_lock_irqsave(io_lock, flags);
1650                io_req = (struct fnic_io_req *)CMD_SP(sc);
1651                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1652                              "Device reset failed"
1653                              " since could not abort all IOs\n");
1654                goto fnic_device_reset_clean;
1655        }
1656
1657        /* Clean lun reset command */
1658        spin_lock_irqsave(io_lock, flags);
1659        io_req = (struct fnic_io_req *)CMD_SP(sc);
1660        if (io_req)
1661                /* Completed, and successful */
1662                ret = SUCCESS;
1663
1664fnic_device_reset_clean:
1665        if (io_req)
1666                CMD_SP(sc) = NULL;
1667
1668        spin_unlock_irqrestore(io_lock, flags);
1669
1670        if (io_req) {
1671                fnic_release_ioreq_buf(fnic, io_req, sc);
1672                mempool_free(io_req, fnic->io_req_pool);
1673        }
1674
1675fnic_device_reset_end:
1676        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1677                      "Returning from device reset %s\n",
1678                      (ret == SUCCESS) ?
1679                      "SUCCESS" : "FAILED");
1680        return ret;
1681}
1682
1683/* Clean up all IOs, clean up libFC local port */
1684int fnic_reset(struct Scsi_Host *shost)
1685{
1686        struct fc_lport *lp;
1687        struct fnic *fnic;
1688        int ret = SUCCESS;
1689
1690        lp = shost_priv(shost);
1691        fnic = lport_priv(lp);
1692
1693        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1694                      "fnic_reset called\n");
1695
1696        /*
1697         * Reset local port, this will clean up libFC exchanges,
1698         * reset remote port sessions, and if link is up, begin flogi
1699         */
1700        if (lp->tt.lport_reset(lp))
1701                ret = FAILED;
1702
1703        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1704                      "Returning from fnic reset %s\n",
1705                      (ret == SUCCESS) ?
1706                      "SUCCESS" : "FAILED");
1707
1708        return ret;
1709}
1710
1711/*
1712 * SCSI Error handling calls driver's eh_host_reset if all prior
1713 * error handling levels return FAILED. If host reset completes
1714 * successfully, and if link is up, then Fabric login begins.
1715 *
1716 * Host Reset is the highest level of error recovery. If this fails, then
1717 * host is offlined by SCSI.
1718 *
1719 */
1720int fnic_host_reset(struct scsi_cmnd *sc)
1721{
1722        int ret;
1723        unsigned long wait_host_tmo;
1724        struct Scsi_Host *shost = sc->device->host;
1725        struct fc_lport *lp = shost_priv(shost);
1726
1727        /*
1728         * If fnic_reset is successful, wait for fabric login to complete
1729         * scsi-ml tries to send a TUR to every device if host reset is
1730         * successful, so before returning to scsi, fabric should be up
1731         */
1732        ret = fnic_reset(shost);
1733        if (ret == SUCCESS) {
1734                wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
1735                ret = FAILED;
1736                while (time_before(jiffies, wait_host_tmo)) {
1737                        if ((lp->state == LPORT_ST_READY) &&
1738                            (lp->link_up)) {
1739                                ret = SUCCESS;
1740                                break;
1741                        }
1742                        ssleep(1);
1743                }
1744        }
1745
1746        return ret;
1747}
1748
1749/*
1750 * This fxn is called from libFC when host is removed
1751 */
1752void fnic_scsi_abort_io(struct fc_lport *lp)
1753{
1754        int err = 0;
1755        unsigned long flags;
1756        enum fnic_state old_state;
1757        struct fnic *fnic = lport_priv(lp);
1758        DECLARE_COMPLETION_ONSTACK(remove_wait);
1759
1760        /* Issue firmware reset for fnic, wait for reset to complete */
1761        spin_lock_irqsave(&fnic->fnic_lock, flags);
1762        fnic->remove_wait = &remove_wait;
1763        old_state = fnic->state;
1764        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1765        vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1766        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1767
1768        err = fnic_fw_reset_handler(fnic);
1769        if (err) {
1770                spin_lock_irqsave(&fnic->fnic_lock, flags);
1771                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1772                        fnic->state = old_state;
1773                fnic->remove_wait = NULL;
1774                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1775                return;
1776        }
1777
1778        /* Wait for firmware reset to complete */
1779        wait_for_completion_timeout(&remove_wait,
1780                                    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
1781
1782        spin_lock_irqsave(&fnic->fnic_lock, flags);
1783        fnic->remove_wait = NULL;
1784        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1785                      "fnic_scsi_abort_io %s\n",
1786                      (fnic->state == FNIC_IN_ETH_MODE) ?
1787                      "SUCCESS" : "FAILED");
1788        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1789
1790}
1791
1792/*
1793 * This fxn called from libFC to clean up driver IO state on link down
1794 */
1795void fnic_scsi_cleanup(struct fc_lport *lp)
1796{
1797        unsigned long flags;
1798        enum fnic_state old_state;
1799        struct fnic *fnic = lport_priv(lp);
1800
1801        /* issue fw reset */
1802        spin_lock_irqsave(&fnic->fnic_lock, flags);
1803        old_state = fnic->state;
1804        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1805        vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1806        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1807
1808        if (fnic_fw_reset_handler(fnic)) {
1809                spin_lock_irqsave(&fnic->fnic_lock, flags);
1810                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1811                        fnic->state = old_state;
1812                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1813        }
1814
1815}
1816
1817void fnic_empty_scsi_cleanup(struct fc_lport *lp)
1818{
1819}
1820
1821void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
1822{
1823        struct fnic *fnic = lport_priv(lp);
1824
1825        /* Non-zero sid, nothing to do */
1826        if (sid)
1827                goto call_fc_exch_mgr_reset;
1828
1829        if (did) {
1830                fnic_rport_exch_reset(fnic, did);
1831                goto call_fc_exch_mgr_reset;
1832        }
1833
1834        /*
1835         * sid = 0, did = 0
1836         * link down or device being removed
1837         */
1838        if (!fnic->in_remove)
1839                fnic_scsi_cleanup(lp);
1840        else
1841                fnic_scsi_abort_io(lp);
1842
1843        /* call libFC exch mgr reset to reset its exchanges */
1844call_fc_exch_mgr_reset:
1845        fc_exch_mgr_reset(lp, sid, did);
1846
1847}
1848