linux/drivers/scsi/fnic/fnic_scsi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#include <linux/mempool.h>
  19#include <linux/errno.h>
  20#include <linux/init.h>
  21#include <linux/workqueue.h>
  22#include <linux/pci.h>
  23#include <linux/scatterlist.h>
  24#include <linux/skbuff.h>
  25#include <linux/spinlock.h>
  26#include <linux/if_ether.h>
  27#include <linux/if_vlan.h>
  28#include <linux/delay.h>
  29#include <linux/gfp.h>
  30#include <scsi/scsi.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34#include <scsi/scsi_tcq.h>
  35#include <scsi/fc/fc_els.h>
  36#include <scsi/fc/fc_fcoe.h>
  37#include <scsi/libfc.h>
  38#include <scsi/fc_frame.h>
  39#include "fnic_io.h"
  40#include "fnic.h"
  41
  42const char *fnic_state_str[] = {
  43        [FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
  44        [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  45        [FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
  46        [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  47};
  48
  49static const char *fnic_ioreq_state_str[] = {
  50        [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
  51        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  52        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  53        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  54        [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  55};
  56
  57static const char *fcpio_status_str[] =  {
  58        [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  59        [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  60        [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  61        [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  62        [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  63        [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  64        [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  65        [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  66        [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  67        [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  68        [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  69        [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  70        [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  71        [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  72        [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  73        [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  74        [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  75        [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  76        [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  77};
  78
  79const char *fnic_state_to_str(unsigned int state)
  80{
  81        if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  82                return "unknown";
  83
  84        return fnic_state_str[state];
  85}
  86
  87static const char *fnic_ioreq_state_to_str(unsigned int state)
  88{
  89        if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  90            !fnic_ioreq_state_str[state])
  91                return "unknown";
  92
  93        return fnic_ioreq_state_str[state];
  94}
  95
  96static const char *fnic_fcpio_status_to_str(unsigned int status)
  97{
  98        if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
  99                return "unknown";
 100
 101        return fcpio_status_str[status];
 102}
 103
 104static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
 105
 106static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
 107                                            struct scsi_cmnd *sc)
 108{
 109        u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
 110
 111        return &fnic->io_req_lock[hash];
 112}
 113
 114static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
 115                                            int tag)
 116{
 117        return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
 118}
 119
 120/*
 121 * Unmap the data buffer and sense buffer for an io_req,
 122 * also unmap and free the device-private scatter/gather list.
 123 */
 124static void fnic_release_ioreq_buf(struct fnic *fnic,
 125                                   struct fnic_io_req *io_req,
 126                                   struct scsi_cmnd *sc)
 127{
 128        if (io_req->sgl_list_pa)
 129                pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
 130                                 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
 131                                 PCI_DMA_TODEVICE);
 132        scsi_dma_unmap(sc);
 133
 134        if (io_req->sgl_cnt)
 135                mempool_free(io_req->sgl_list_alloc,
 136                             fnic->io_sgl_pool[io_req->sgl_type]);
 137        if (io_req->sense_buf_pa)
 138                pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
 139                                 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
 140}
 141
 142/* Free up Copy Wq descriptors. Called with copy_wq lock held */
 143static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 144{
 145        /* if no Ack received from firmware, then nothing to clean */
 146        if (!fnic->fw_ack_recd[0])
 147                return 1;
 148
 149        /*
 150         * Update desc_available count based on number of freed descriptors
 151         * Account for wraparound
 152         */
 153        if (wq->to_clean_index <= fnic->fw_ack_index[0])
 154                wq->ring.desc_avail += (fnic->fw_ack_index[0]
 155                                        - wq->to_clean_index + 1);
 156        else
 157                wq->ring.desc_avail += (wq->ring.desc_count
 158                                        - wq->to_clean_index
 159                                        + fnic->fw_ack_index[0] + 1);
 160
 161        /*
 162         * just bump clean index to ack_index+1 accounting for wraparound
 163         * this will essentially free up all descriptors between
 164         * to_clean_index and fw_ack_index, both inclusive
 165         */
 166        wq->to_clean_index =
 167                (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
 168
 169        /* we have processed the acks received so far */
 170        fnic->fw_ack_recd[0] = 0;
 171        return 0;
 172}
 173
 174
 175/**
 176 * __fnic_set_state_flags
 177 * Sets/Clears bits in fnic's state_flags
 178 **/
 179void
 180__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
 181                        unsigned long clearbits)
 182{
 183        struct Scsi_Host *host = fnic->lport->host;
 184        int sh_locked = spin_is_locked(host->host_lock);
 185        unsigned long flags = 0;
 186
 187        if (!sh_locked)
 188                spin_lock_irqsave(host->host_lock, flags);
 189
 190        if (clearbits)
 191                fnic->state_flags &= ~st_flags;
 192        else
 193                fnic->state_flags |= st_flags;
 194
 195        if (!sh_locked)
 196                spin_unlock_irqrestore(host->host_lock, flags);
 197
 198        return;
 199}
 200
 201
 202/*
 203 * fnic_fw_reset_handler
 204 * Routine to send reset msg to fw
 205 */
 206int fnic_fw_reset_handler(struct fnic *fnic)
 207{
 208        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 209        int ret = 0;
 210        unsigned long flags;
 211
 212        /* indicate fwreset to io path */
 213        fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
 214
 215        skb_queue_purge(&fnic->frame_queue);
 216        skb_queue_purge(&fnic->tx_queue);
 217
 218        /* wait for io cmpl */
 219        while (atomic_read(&fnic->in_flight))
 220                schedule_timeout(msecs_to_jiffies(1));
 221
 222        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 223
 224        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 225                free_wq_copy_descs(fnic, wq);
 226
 227        if (!vnic_wq_copy_desc_avail(wq))
 228                ret = -EAGAIN;
 229        else {
 230                fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
 231                atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 232                if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 233                          atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 234                        atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 235                                atomic64_read(
 236                                  &fnic->fnic_stats.fw_stats.active_fw_reqs));
 237        }
 238
 239        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 240
 241        if (!ret) {
 242                atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
 243                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 244                              "Issued fw reset\n");
 245        } else {
 246                fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 247                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 248                              "Failed to issue fw reset\n");
 249        }
 250
 251        return ret;
 252}
 253
 254
 255/*
 256 * fnic_flogi_reg_handler
 257 * Routine to send flogi register msg to fw
 258 */
 259int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
 260{
 261        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
 262        enum fcpio_flogi_reg_format_type format;
 263        struct fc_lport *lp = fnic->lport;
 264        u8 gw_mac[ETH_ALEN];
 265        int ret = 0;
 266        unsigned long flags;
 267
 268        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 269
 270        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 271                free_wq_copy_descs(fnic, wq);
 272
 273        if (!vnic_wq_copy_desc_avail(wq)) {
 274                ret = -EAGAIN;
 275                goto flogi_reg_ioreq_end;
 276        }
 277
 278        if (fnic->ctlr.map_dest) {
 279                memset(gw_mac, 0xff, ETH_ALEN);
 280                format = FCPIO_FLOGI_REG_DEF_DEST;
 281        } else {
 282                memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
 283                format = FCPIO_FLOGI_REG_GW_DEST;
 284        }
 285
 286        if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
 287                fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
 288                                                fc_id, gw_mac,
 289                                                fnic->data_src_addr,
 290                                                lp->r_a_tov, lp->e_d_tov);
 291                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 292                              "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
 293                              fc_id, fnic->data_src_addr, gw_mac);
 294        } else {
 295                fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
 296                                                  format, fc_id, gw_mac);
 297                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 298                              "FLOGI reg issued fcid %x map %d dest %pM\n",
 299                              fc_id, fnic->ctlr.map_dest, gw_mac);
 300        }
 301
 302        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 303        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 304                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 305                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 306                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 307
 308flogi_reg_ioreq_end:
 309        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 310        return ret;
 311}
 312
 313/*
 314 * fnic_queue_wq_copy_desc
 315 * Routine to enqueue a wq copy desc
 316 */
 317static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 318                                          struct vnic_wq_copy *wq,
 319                                          struct fnic_io_req *io_req,
 320                                          struct scsi_cmnd *sc,
 321                                          int sg_count)
 322{
 323        struct scatterlist *sg;
 324        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 325        struct fc_rport_libfc_priv *rp = rport->dd_data;
 326        struct host_sg_desc *desc;
 327        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 328        u8 pri_tag = 0;
 329        unsigned int i;
 330        unsigned long intr_flags;
 331        int flags;
 332        u8 exch_flags;
 333        struct scsi_lun fc_lun;
 334        char msg[2];
 335
 336        if (sg_count) {
 337                /* For each SGE, create a device desc entry */
 338                desc = io_req->sgl_list;
 339                for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
 340                        desc->addr = cpu_to_le64(sg_dma_address(sg));
 341                        desc->len = cpu_to_le32(sg_dma_len(sg));
 342                        desc->_resvd = 0;
 343                        desc++;
 344                }
 345
 346                io_req->sgl_list_pa = pci_map_single
 347                        (fnic->pdev,
 348                         io_req->sgl_list,
 349                         sizeof(io_req->sgl_list[0]) * sg_count,
 350                         PCI_DMA_TODEVICE);
 351        }
 352
 353        io_req->sense_buf_pa = pci_map_single(fnic->pdev,
 354                                              sc->sense_buffer,
 355                                              SCSI_SENSE_BUFFERSIZE,
 356                                              PCI_DMA_FROMDEVICE);
 357
 358        int_to_scsilun(sc->device->lun, &fc_lun);
 359
 360        pri_tag = FCPIO_ICMND_PTA_SIMPLE;
 361        msg[0] = MSG_SIMPLE_TAG;
 362        scsi_populate_tag_msg(sc, msg);
 363        if (msg[0] == MSG_ORDERED_TAG)
 364                pri_tag = FCPIO_ICMND_PTA_ORDERED;
 365
 366        /* Enqueue the descriptor in the Copy WQ */
 367        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 368
 369        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
 370                free_wq_copy_descs(fnic, wq);
 371
 372        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
 373                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 374                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 375                          "fnic_queue_wq_copy_desc failure - no descriptors\n");
 376                atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
 377                return SCSI_MLQUEUE_HOST_BUSY;
 378        }
 379
 380        flags = 0;
 381        if (sc->sc_data_direction == DMA_FROM_DEVICE)
 382                flags = FCPIO_ICMND_RDDATA;
 383        else if (sc->sc_data_direction == DMA_TO_DEVICE)
 384                flags = FCPIO_ICMND_WRDATA;
 385
 386        exch_flags = 0;
 387        if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
 388            (rp->flags & FC_RP_FLAGS_RETRY))
 389                exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
 390
 391        fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
 392                                         0, exch_flags, io_req->sgl_cnt,
 393                                         SCSI_SENSE_BUFFERSIZE,
 394                                         io_req->sgl_list_pa,
 395                                         io_req->sense_buf_pa,
 396                                         0, /* scsi cmd ref, always 0 */
 397                                         pri_tag, /* scsi pri and tag */
 398                                         flags, /* command flags */
 399                                         sc->cmnd, sc->cmd_len,
 400                                         scsi_bufflen(sc),
 401                                         fc_lun.scsi_lun, io_req->port_id,
 402                                         rport->maxframe_size, rp->r_a_tov,
 403                                         rp->e_d_tov);
 404
 405        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
 406        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
 407                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
 408                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
 409                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
 410
 411        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
 412        return 0;
 413}
 414
 415/*
 416 * fnic_queuecommand
 417 * Routine to send a scsi cdb
 418 * Called with host_lock held and interrupts disabled.
 419 */
 420static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 421{
 422        struct fc_lport *lp = shost_priv(sc->device->host);
 423        struct fc_rport *rport;
 424        struct fnic_io_req *io_req = NULL;
 425        struct fnic *fnic = lport_priv(lp);
 426        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 427        struct vnic_wq_copy *wq;
 428        int ret;
 429        u64 cmd_trace;
 430        int sg_count = 0;
 431        unsigned long flags;
 432        unsigned long ptr;
 433
 434        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
 435                return SCSI_MLQUEUE_HOST_BUSY;
 436
 437        rport = starget_to_rport(scsi_target(sc->device));
 438        ret = fc_remote_port_chkready(rport);
 439        if (ret) {
 440                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 441                sc->result = ret;
 442                done(sc);
 443                return 0;
 444        }
 445
 446        if (lp->state != LPORT_ST_READY || !(lp->link_up))
 447                return SCSI_MLQUEUE_HOST_BUSY;
 448
 449        atomic_inc(&fnic->in_flight);
 450
 451        /*
 452         * Release host lock, use driver resource specific locks from here.
 453         * Don't re-enable interrupts in case they were disabled prior to the
 454         * caller disabling them.
 455         */
 456        spin_unlock(lp->host->host_lock);
 457        CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
 458        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 459
 460        /* Get a new io_req for this SCSI IO */
 461        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
 462        if (!io_req) {
 463                atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 464                ret = SCSI_MLQUEUE_HOST_BUSY;
 465                goto out;
 466        }
 467        memset(io_req, 0, sizeof(*io_req));
 468
 469        /* Map the data buffer */
 470        sg_count = scsi_dma_map(sc);
 471        if (sg_count < 0) {
 472                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 473                          sc->request->tag, sc, 0, sc->cmnd[0],
 474                          sg_count, CMD_STATE(sc));
 475                mempool_free(io_req, fnic->io_req_pool);
 476                goto out;
 477        }
 478
 479        /* Determine the type of scatter/gather list we need */
 480        io_req->sgl_cnt = sg_count;
 481        io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
 482        if (sg_count > FNIC_DFLT_SG_DESC_CNT)
 483                io_req->sgl_type = FNIC_SGL_CACHE_MAX;
 484
 485        if (sg_count) {
 486                io_req->sgl_list =
 487                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
 488                                      GFP_ATOMIC);
 489                if (!io_req->sgl_list) {
 490                        atomic64_inc(&fnic_stats->io_stats.alloc_failures);
 491                        ret = SCSI_MLQUEUE_HOST_BUSY;
 492                        scsi_dma_unmap(sc);
 493                        mempool_free(io_req, fnic->io_req_pool);
 494                        goto out;
 495                }
 496
 497                /* Cache sgl list allocated address before alignment */
 498                io_req->sgl_list_alloc = io_req->sgl_list;
 499                ptr = (unsigned long) io_req->sgl_list;
 500                if (ptr % FNIC_SG_DESC_ALIGN) {
 501                        io_req->sgl_list = (struct host_sg_desc *)
 502                                (((unsigned long) ptr
 503                                  + FNIC_SG_DESC_ALIGN - 1)
 504                                 & ~(FNIC_SG_DESC_ALIGN - 1));
 505                }
 506        }
 507
 508        /* initialize rest of io_req */
 509        io_req->port_id = rport->port_id;
 510        io_req->start_time = jiffies;
 511        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
 512        CMD_SP(sc) = (char *)io_req;
 513        CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
 514        sc->scsi_done = done;
 515
 516        /* create copy wq desc and enqueue it */
 517        wq = &fnic->wq_copy[0];
 518        ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
 519        if (ret) {
 520                /*
 521                 * In case another thread cancelled the request,
 522                 * refetch the pointer under the lock.
 523                 */
 524                spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
 525                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 526                          sc->request->tag, sc, 0, 0, 0,
 527                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
 528                spin_lock_irqsave(io_lock, flags);
 529                io_req = (struct fnic_io_req *)CMD_SP(sc);
 530                CMD_SP(sc) = NULL;
 531                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 532                spin_unlock_irqrestore(io_lock, flags);
 533                if (io_req) {
 534                        fnic_release_ioreq_buf(fnic, io_req, sc);
 535                        mempool_free(io_req, fnic->io_req_pool);
 536                }
 537        } else {
 538                atomic64_inc(&fnic_stats->io_stats.active_ios);
 539                atomic64_inc(&fnic_stats->io_stats.num_ios);
 540                if (atomic64_read(&fnic_stats->io_stats.active_ios) >
 541                          atomic64_read(&fnic_stats->io_stats.max_active_ios))
 542                        atomic64_set(&fnic_stats->io_stats.max_active_ios,
 543                             atomic64_read(&fnic_stats->io_stats.active_ios));
 544
 545                /* REVISIT: Use per IO lock in the final code */
 546                CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
 547        }
 548out:
 549        cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
 550                        (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
 551                        (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
 552                        sc->cmnd[5]);
 553
 554        FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
 555                  sc->request->tag, sc, io_req,
 556                  sg_count, cmd_trace,
 557                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 558        atomic_dec(&fnic->in_flight);
 559        /* acquire host lock before returning to SCSI */
 560        spin_lock(lp->host->host_lock);
 561        return ret;
 562}
 563
 564DEF_SCSI_QCMD(fnic_queuecommand)
 565
 566/*
 567 * fnic_fcpio_fw_reset_cmpl_handler
 568 * Routine to handle fw reset completion
 569 */
 570static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 571                                            struct fcpio_fw_req *desc)
 572{
 573        u8 type;
 574        u8 hdr_status;
 575        struct fcpio_tag tag;
 576        int ret = 0;
 577        unsigned long flags;
 578        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
 579
 580        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 581
 582        atomic64_inc(&reset_stats->fw_reset_completions);
 583
 584        /* Clean up all outstanding io requests */
 585        fnic_cleanup_io(fnic, SCSI_NO_TAG);
 586
 587        atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
 588        atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
 589
 590        spin_lock_irqsave(&fnic->fnic_lock, flags);
 591
 592        /* fnic should be in FC_TRANS_ETH_MODE */
 593        if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
 594                /* Check status of reset completion */
 595                if (!hdr_status) {
 596                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 597                                      "reset cmpl success\n");
 598                        /* Ready to send flogi out */
 599                        fnic->state = FNIC_IN_ETH_MODE;
 600                } else {
 601                        FNIC_SCSI_DBG(KERN_DEBUG,
 602                                      fnic->lport->host,
 603                                      "fnic fw_reset : failed %s\n",
 604                                      fnic_fcpio_status_to_str(hdr_status));
 605
 606                        /*
 607                         * Unable to change to eth mode, cannot send out flogi
 608                         * Change state to fc mode, so that subsequent Flogi
 609                         * requests from libFC will cause more attempts to
 610                         * reset the firmware. Free the cached flogi
 611                         */
 612                        fnic->state = FNIC_IN_FC_MODE;
 613                        atomic64_inc(&reset_stats->fw_reset_failures);
 614                        ret = -1;
 615                }
 616        } else {
 617                FNIC_SCSI_DBG(KERN_DEBUG,
 618                              fnic->lport->host,
 619                              "Unexpected state %s while processing"
 620                              " reset cmpl\n", fnic_state_to_str(fnic->state));
 621                atomic64_inc(&reset_stats->fw_reset_failures);
 622                ret = -1;
 623        }
 624
 625        /* Thread removing device blocks till firmware reset is complete */
 626        if (fnic->remove_wait)
 627                complete(fnic->remove_wait);
 628
 629        /*
 630         * If fnic is being removed, or fw reset failed
 631         * free the flogi frame. Else, send it out
 632         */
 633        if (fnic->remove_wait || ret) {
 634                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 635                skb_queue_purge(&fnic->tx_queue);
 636                goto reset_cmpl_handler_end;
 637        }
 638
 639        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 640
 641        fnic_flush_tx(fnic);
 642
 643 reset_cmpl_handler_end:
 644        fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
 645
 646        return ret;
 647}
 648
 649/*
 650 * fnic_fcpio_flogi_reg_cmpl_handler
 651 * Routine to handle flogi register completion
 652 */
 653static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
 654                                             struct fcpio_fw_req *desc)
 655{
 656        u8 type;
 657        u8 hdr_status;
 658        struct fcpio_tag tag;
 659        int ret = 0;
 660        unsigned long flags;
 661
 662        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 663
 664        /* Update fnic state based on status of flogi reg completion */
 665        spin_lock_irqsave(&fnic->fnic_lock, flags);
 666
 667        if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
 668
 669                /* Check flogi registration completion status */
 670                if (!hdr_status) {
 671                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 672                                      "flog reg succeeded\n");
 673                        fnic->state = FNIC_IN_FC_MODE;
 674                } else {
 675                        FNIC_SCSI_DBG(KERN_DEBUG,
 676                                      fnic->lport->host,
 677                                      "fnic flogi reg :failed %s\n",
 678                                      fnic_fcpio_status_to_str(hdr_status));
 679                        fnic->state = FNIC_IN_ETH_MODE;
 680                        ret = -1;
 681                }
 682        } else {
 683                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 684                              "Unexpected fnic state %s while"
 685                              " processing flogi reg completion\n",
 686                              fnic_state_to_str(fnic->state));
 687                ret = -1;
 688        }
 689
 690        if (!ret) {
 691                if (fnic->stop_rx_link_events) {
 692                        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 693                        goto reg_cmpl_handler_end;
 694                }
 695                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 696
 697                fnic_flush_tx(fnic);
 698                queue_work(fnic_event_queue, &fnic->frame_work);
 699        } else {
 700                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 701        }
 702
 703reg_cmpl_handler_end:
 704        return ret;
 705}
 706
 707static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
 708                                        u16 request_out)
 709{
 710        if (wq->to_clean_index <= wq->to_use_index) {
 711                /* out of range, stale request_out index */
 712                if (request_out < wq->to_clean_index ||
 713                    request_out >= wq->to_use_index)
 714                        return 0;
 715        } else {
 716                /* out of range, stale request_out index */
 717                if (request_out < wq->to_clean_index &&
 718                    request_out >= wq->to_use_index)
 719                        return 0;
 720        }
 721        /* request_out index is in range */
 722        return 1;
 723}
 724
 725
 726/*
 727 * Mark that ack received and store the Ack index. If there are multiple
 728 * acks received before Tx thread cleans it up, the latest value will be
 729 * used which is correct behavior. This state should be in the copy Wq
 730 * instead of in the fnic
 731 */
 732static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
 733                                          unsigned int cq_index,
 734                                          struct fcpio_fw_req *desc)
 735{
 736        struct vnic_wq_copy *wq;
 737        u16 request_out = desc->u.ack.request_out;
 738        unsigned long flags;
 739        u64 *ox_id_tag = (u64 *)(void *)desc;
 740
 741        /* mark the ack state */
 742        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
 743        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 744
 745        fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
 746        if (is_ack_index_in_range(wq, request_out)) {
 747                fnic->fw_ack_index[0] = request_out;
 748                fnic->fw_ack_recd[0] = 1;
 749        } else
 750                atomic64_inc(
 751                        &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
 752
 753        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 754        FNIC_TRACE(fnic_fcpio_ack_handler,
 755                  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
 756                  ox_id_tag[4], ox_id_tag[5]);
 757}
 758
 759/*
 760 * fnic_fcpio_icmnd_cmpl_handler
 761 * Routine to handle icmnd completions
 762 */
 763static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 764                                         struct fcpio_fw_req *desc)
 765{
 766        u8 type;
 767        u8 hdr_status;
 768        struct fcpio_tag tag;
 769        u32 id;
 770        u64 xfer_len = 0;
 771        struct fcpio_icmnd_cmpl *icmnd_cmpl;
 772        struct fnic_io_req *io_req;
 773        struct scsi_cmnd *sc;
 774        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 775        unsigned long flags;
 776        spinlock_t *io_lock;
 777        u64 cmd_trace;
 778        unsigned long start_time;
 779
 780        /* Decode the cmpl description to get the io_req id */
 781        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 782        fcpio_tag_id_dec(&tag, &id);
 783        icmnd_cmpl = &desc->u.icmnd_cmpl;
 784
 785        if (id >= fnic->fnic_max_tag_id) {
 786                shost_printk(KERN_ERR, fnic->lport->host,
 787                        "Tag out of range tag %x hdr status = %s\n",
 788                             id, fnic_fcpio_status_to_str(hdr_status));
 789                return;
 790        }
 791
 792        sc = scsi_host_find_tag(fnic->lport->host, id);
 793        WARN_ON_ONCE(!sc);
 794        if (!sc) {
 795                atomic64_inc(&fnic_stats->io_stats.sc_null);
 796                shost_printk(KERN_ERR, fnic->lport->host,
 797                          "icmnd_cmpl sc is null - "
 798                          "hdr status = %s tag = 0x%x desc = 0x%p\n",
 799                          fnic_fcpio_status_to_str(hdr_status), id, desc);
 800                FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
 801                          fnic->lport->host->host_no, id,
 802                          ((u64)icmnd_cmpl->_resvd0[1] << 16 |
 803                          (u64)icmnd_cmpl->_resvd0[0]),
 804                          ((u64)hdr_status << 16 |
 805                          (u64)icmnd_cmpl->scsi_status << 8 |
 806                          (u64)icmnd_cmpl->flags), desc,
 807                          (u64)icmnd_cmpl->residual, 0);
 808                return;
 809        }
 810
 811        io_lock = fnic_io_lock_hash(fnic, sc);
 812        spin_lock_irqsave(io_lock, flags);
 813        io_req = (struct fnic_io_req *)CMD_SP(sc);
 814        WARN_ON_ONCE(!io_req);
 815        if (!io_req) {
 816                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
 817                CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
 818                spin_unlock_irqrestore(io_lock, flags);
 819                shost_printk(KERN_ERR, fnic->lport->host,
 820                          "icmnd_cmpl io_req is null - "
 821                          "hdr status = %s tag = 0x%x sc 0x%p\n",
 822                          fnic_fcpio_status_to_str(hdr_status), id, sc);
 823                return;
 824        }
 825        start_time = io_req->start_time;
 826
 827        /* firmware completed the io */
 828        io_req->io_completed = 1;
 829
 830        /*
 831         *  if SCSI-ML has already issued abort on this command,
 832         * ignore completion of the IO. The abts path will clean it up
 833         */
 834        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
 835                spin_unlock_irqrestore(io_lock, flags);
 836                CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
 837                switch (hdr_status) {
 838                case FCPIO_SUCCESS:
 839                        CMD_FLAGS(sc) |= FNIC_IO_DONE;
 840                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 841                                  "icmnd_cmpl ABTS pending hdr status = %s "
 842                                  "sc  0x%p scsi_status %x  residual %d\n",
 843                                  fnic_fcpio_status_to_str(hdr_status), sc,
 844                                  icmnd_cmpl->scsi_status,
 845                                  icmnd_cmpl->residual);
 846                        break;
 847                case FCPIO_ABORTED:
 848                        CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
 849                        break;
 850                default:
 851                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
 852                                          "icmnd_cmpl abts pending "
 853                                          "hdr status = %s tag = 0x%x sc = 0x%p\n",
 854                                          fnic_fcpio_status_to_str(hdr_status),
 855                                          id, sc);
 856                        break;
 857                }
 858                return;
 859        }
 860
 861        /* Mark the IO as complete */
 862        CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
 863
 864        icmnd_cmpl = &desc->u.icmnd_cmpl;
 865
 866        switch (hdr_status) {
 867        case FCPIO_SUCCESS:
 868                sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
 869                xfer_len = scsi_bufflen(sc);
 870                scsi_set_resid(sc, icmnd_cmpl->residual);
 871
 872                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
 873                        xfer_len -= icmnd_cmpl->residual;
 874
 875                if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
 876                        atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
 877                break;
 878
 879        case FCPIO_TIMEOUT:          /* request was timed out */
 880                atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
 881                sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
 882                break;
 883
 884        case FCPIO_ABORTED:          /* request was aborted */
 885                atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
 886                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 887                break;
 888
 889        case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
 890                atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
 891                scsi_set_resid(sc, icmnd_cmpl->residual);
 892                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 893                break;
 894
 895        case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
 896                atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
 897                sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
 898                break;
 899
 900        case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
 901                atomic64_inc(&fnic_stats->io_stats.io_not_found);
 902                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 903                break;
 904
 905        case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
 906                atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
 907                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 908                break;
 909
 910        case FCPIO_FW_ERR:           /* request was terminated due fw error */
 911                atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
 912                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 913                break;
 914
 915        case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
 916                atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
 917                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 918                break;
 919
 920        case FCPIO_INVALID_HEADER:   /* header contains invalid data */
 921        case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
 922        case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
 923        default:
 924                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
 925                             fnic_fcpio_status_to_str(hdr_status));
 926                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
 927                break;
 928        }
 929
 930        if (hdr_status != FCPIO_SUCCESS) {
 931                atomic64_inc(&fnic_stats->io_stats.io_failures);
 932                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
 933                             fnic_fcpio_status_to_str(hdr_status));
 934        }
 935        /* Break link with the SCSI command */
 936        CMD_SP(sc) = NULL;
 937        CMD_FLAGS(sc) |= FNIC_IO_DONE;
 938
 939        spin_unlock_irqrestore(io_lock, flags);
 940
 941        fnic_release_ioreq_buf(fnic, io_req, sc);
 942
 943        mempool_free(io_req, fnic->io_req_pool);
 944
 945        cmd_trace = ((u64)hdr_status << 56) |
 946                  (u64)icmnd_cmpl->scsi_status << 48 |
 947                  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
 948                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
 949                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
 950
 951        FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
 952                  sc->device->host->host_no, id, sc,
 953                  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
 954                  (u64)icmnd_cmpl->_resvd0[0] << 48 |
 955                  jiffies_to_msecs(jiffies - start_time)),
 956                  desc, cmd_trace,
 957                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
 958
 959        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
 960                fnic->lport->host_stats.fcp_input_requests++;
 961                fnic->fcp_input_bytes += xfer_len;
 962        } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
 963                fnic->lport->host_stats.fcp_output_requests++;
 964                fnic->fcp_output_bytes += xfer_len;
 965        } else
 966                fnic->lport->host_stats.fcp_control_requests++;
 967
 968        atomic64_dec(&fnic_stats->io_stats.active_ios);
 969        if (atomic64_read(&fnic->io_cmpl_skip))
 970                atomic64_dec(&fnic->io_cmpl_skip);
 971        else
 972                atomic64_inc(&fnic_stats->io_stats.io_completions);
 973
 974        /* Call SCSI completion function to complete the IO */
 975        if (sc->scsi_done)
 976                sc->scsi_done(sc);
 977}
 978
 979/* fnic_fcpio_itmf_cmpl_handler
 980 * Routine to handle itmf completions
 981 */
 982static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
 983                                        struct fcpio_fw_req *desc)
 984{
 985        u8 type;
 986        u8 hdr_status;
 987        struct fcpio_tag tag;
 988        u32 id;
 989        struct scsi_cmnd *sc;
 990        struct fnic_io_req *io_req;
 991        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 992        struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
 993        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
 994        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
 995        unsigned long flags;
 996        spinlock_t *io_lock;
 997        unsigned long start_time;
 998
 999        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1000        fcpio_tag_id_dec(&tag, &id);
1001
1002        if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1003                shost_printk(KERN_ERR, fnic->lport->host,
1004                "Tag out of range tag %x hdr status = %s\n",
1005                id, fnic_fcpio_status_to_str(hdr_status));
1006                return;
1007        }
1008
1009        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1010        WARN_ON_ONCE(!sc);
1011        if (!sc) {
1012                atomic64_inc(&fnic_stats->io_stats.sc_null);
1013                shost_printk(KERN_ERR, fnic->lport->host,
1014                          "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1015                          fnic_fcpio_status_to_str(hdr_status), id);
1016                return;
1017        }
1018        io_lock = fnic_io_lock_hash(fnic, sc);
1019        spin_lock_irqsave(io_lock, flags);
1020        io_req = (struct fnic_io_req *)CMD_SP(sc);
1021        WARN_ON_ONCE(!io_req);
1022        if (!io_req) {
1023                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1024                spin_unlock_irqrestore(io_lock, flags);
1025                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1026                shost_printk(KERN_ERR, fnic->lport->host,
1027                          "itmf_cmpl io_req is null - "
1028                          "hdr status = %s tag = 0x%x sc 0x%p\n",
1029                          fnic_fcpio_status_to_str(hdr_status), id, sc);
1030                return;
1031        }
1032        start_time = io_req->start_time;
1033
1034        if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1035                /* Abort and terminate completion of device reset req */
1036                /* REVISIT : Add asserts about various flags */
1037                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1038                              "dev reset abts cmpl recd. id %x status %s\n",
1039                              id, fnic_fcpio_status_to_str(hdr_status));
1040                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1041                CMD_ABTS_STATUS(sc) = hdr_status;
1042                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1043                if (io_req->abts_done)
1044                        complete(io_req->abts_done);
1045                spin_unlock_irqrestore(io_lock, flags);
1046        } else if (id & FNIC_TAG_ABORT) {
1047                /* Completion of abort cmd */
1048                switch (hdr_status) {
1049                case FCPIO_SUCCESS:
1050                        break;
1051                case FCPIO_TIMEOUT:
1052                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1053                                atomic64_inc(&abts_stats->abort_fw_timeouts);
1054                        else
1055                                atomic64_inc(
1056                                        &term_stats->terminate_fw_timeouts);
1057                        break;
1058                case FCPIO_IO_NOT_FOUND:
1059                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1060                                atomic64_inc(&abts_stats->abort_io_not_found);
1061                        else
1062                                atomic64_inc(
1063                                        &term_stats->terminate_io_not_found);
1064                        break;
1065                default:
1066                        if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1067                                atomic64_inc(&abts_stats->abort_failures);
1068                        else
1069                                atomic64_inc(
1070                                        &term_stats->terminate_failures);
1071                        break;
1072                }
1073                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1074                        /* This is a late completion. Ignore it */
1075                        spin_unlock_irqrestore(io_lock, flags);
1076                        return;
1077                }
1078                CMD_ABTS_STATUS(sc) = hdr_status;
1079                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1080
1081                atomic64_dec(&fnic_stats->io_stats.active_ios);
1082                if (atomic64_read(&fnic->io_cmpl_skip))
1083                        atomic64_dec(&fnic->io_cmpl_skip);
1084                else
1085                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1086
1087                if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1088                        atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1089
1090                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1091                              "abts cmpl recd. id %d status %s\n",
1092                              (int)(id & FNIC_TAG_MASK),
1093                              fnic_fcpio_status_to_str(hdr_status));
1094
1095                /*
1096                 * If scsi_eh thread is blocked waiting for abts to complete,
1097                 * signal completion to it. IO will be cleaned in the thread
1098                 * else clean it in this context
1099                 */
1100                if (io_req->abts_done) {
1101                        complete(io_req->abts_done);
1102                        spin_unlock_irqrestore(io_lock, flags);
1103                } else {
1104                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1105                                      "abts cmpl, completing IO\n");
1106                        CMD_SP(sc) = NULL;
1107                        sc->result = (DID_ERROR << 16);
1108
1109                        spin_unlock_irqrestore(io_lock, flags);
1110
1111                        fnic_release_ioreq_buf(fnic, io_req, sc);
1112                        mempool_free(io_req, fnic->io_req_pool);
1113                        if (sc->scsi_done) {
1114                                FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1115                                        sc->device->host->host_no, id,
1116                                        sc,
1117                                        jiffies_to_msecs(jiffies - start_time),
1118                                        desc,
1119                                        (((u64)hdr_status << 40) |
1120                                        (u64)sc->cmnd[0] << 32 |
1121                                        (u64)sc->cmnd[2] << 24 |
1122                                        (u64)sc->cmnd[3] << 16 |
1123                                        (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1124                                        (((u64)CMD_FLAGS(sc) << 32) |
1125                                        CMD_STATE(sc)));
1126                                sc->scsi_done(sc);
1127                        }
1128                }
1129
1130        } else if (id & FNIC_TAG_DEV_RST) {
1131                /* Completion of device reset */
1132                CMD_LR_STATUS(sc) = hdr_status;
1133                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1134                        spin_unlock_irqrestore(io_lock, flags);
1135                        CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1136                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1137                                  sc->device->host->host_no, id, sc,
1138                                  jiffies_to_msecs(jiffies - start_time),
1139                                  desc, 0,
1140                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1141                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1142                                "Terminate pending "
1143                                "dev reset cmpl recd. id %d status %s\n",
1144                                (int)(id & FNIC_TAG_MASK),
1145                                fnic_fcpio_status_to_str(hdr_status));
1146                        return;
1147                }
1148                if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1149                        /* Need to wait for terminate completion */
1150                        spin_unlock_irqrestore(io_lock, flags);
1151                        FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1152                                  sc->device->host->host_no, id, sc,
1153                                  jiffies_to_msecs(jiffies - start_time),
1154                                  desc, 0,
1155                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1156                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1157                                "dev reset cmpl recd after time out. "
1158                                "id %d status %s\n",
1159                                (int)(id & FNIC_TAG_MASK),
1160                                fnic_fcpio_status_to_str(hdr_status));
1161                        return;
1162                }
1163                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1164                CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1165                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1166                              "dev reset cmpl recd. id %d status %s\n",
1167                              (int)(id & FNIC_TAG_MASK),
1168                              fnic_fcpio_status_to_str(hdr_status));
1169                if (io_req->dr_done)
1170                        complete(io_req->dr_done);
1171                spin_unlock_irqrestore(io_lock, flags);
1172
1173        } else {
1174                shost_printk(KERN_ERR, fnic->lport->host,
1175                             "Unexpected itmf io state %s tag %x\n",
1176                             fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1177                spin_unlock_irqrestore(io_lock, flags);
1178        }
1179
1180}
1181
1182/*
1183 * fnic_fcpio_cmpl_handler
1184 * Routine to service the cq for wq_copy
1185 */
1186static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1187                                   unsigned int cq_index,
1188                                   struct fcpio_fw_req *desc)
1189{
1190        struct fnic *fnic = vnic_dev_priv(vdev);
1191
1192        switch (desc->hdr.type) {
1193        case FCPIO_ICMND_CMPL: /* fw completed a command */
1194        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1195        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1196        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1197        case FCPIO_RESET_CMPL: /* fw completed reset */
1198                atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1199                break;
1200        default:
1201                break;
1202        }
1203
1204        switch (desc->hdr.type) {
1205        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1206                fnic_fcpio_ack_handler(fnic, cq_index, desc);
1207                break;
1208
1209        case FCPIO_ICMND_CMPL: /* fw completed a command */
1210                fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1211                break;
1212
1213        case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1214                fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1215                break;
1216
1217        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1218        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1219                fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1220                break;
1221
1222        case FCPIO_RESET_CMPL: /* fw completed reset */
1223                fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1224                break;
1225
1226        default:
1227                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1228                              "firmware completion type %d\n",
1229                              desc->hdr.type);
1230                break;
1231        }
1232
1233        return 0;
1234}
1235
1236/*
1237 * fnic_wq_copy_cmpl_handler
1238 * Routine to process wq copy
1239 */
1240int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1241{
1242        unsigned int wq_work_done = 0;
1243        unsigned int i, cq_index;
1244        unsigned int cur_work_done;
1245
1246        for (i = 0; i < fnic->wq_copy_count; i++) {
1247                cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1248                cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1249                                                     fnic_fcpio_cmpl_handler,
1250                                                     copy_work_to_do);
1251                wq_work_done += cur_work_done;
1252        }
1253        return wq_work_done;
1254}
1255
1256static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1257{
1258        int i;
1259        struct fnic_io_req *io_req;
1260        unsigned long flags = 0;
1261        struct scsi_cmnd *sc;
1262        spinlock_t *io_lock;
1263        unsigned long start_time = 0;
1264        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1265
1266        for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1267                if (i == exclude_id)
1268                        continue;
1269
1270                io_lock = fnic_io_lock_tag(fnic, i);
1271                spin_lock_irqsave(io_lock, flags);
1272                sc = scsi_host_find_tag(fnic->lport->host, i);
1273                if (!sc) {
1274                        spin_unlock_irqrestore(io_lock, flags);
1275                        continue;
1276                }
1277
1278                io_req = (struct fnic_io_req *)CMD_SP(sc);
1279                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1280                        !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1281                        /*
1282                         * We will be here only when FW completes reset
1283                         * without sending completions for outstanding ios.
1284                         */
1285                        CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1286                        if (io_req && io_req->dr_done)
1287                                complete(io_req->dr_done);
1288                        else if (io_req && io_req->abts_done)
1289                                complete(io_req->abts_done);
1290                        spin_unlock_irqrestore(io_lock, flags);
1291                        continue;
1292                } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1293                        spin_unlock_irqrestore(io_lock, flags);
1294                        continue;
1295                }
1296                if (!io_req) {
1297                        spin_unlock_irqrestore(io_lock, flags);
1298                        goto cleanup_scsi_cmd;
1299                }
1300
1301                CMD_SP(sc) = NULL;
1302
1303                spin_unlock_irqrestore(io_lock, flags);
1304
1305                /*
1306                 * If there is a scsi_cmnd associated with this io_req, then
1307                 * free the corresponding state
1308                 */
1309                start_time = io_req->start_time;
1310                fnic_release_ioreq_buf(fnic, io_req, sc);
1311                mempool_free(io_req, fnic->io_req_pool);
1312
1313cleanup_scsi_cmd:
1314                sc->result = DID_TRANSPORT_DISRUPTED << 16;
1315                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1316                              "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1317                              __func__, (jiffies - start_time));
1318
1319                if (atomic64_read(&fnic->io_cmpl_skip))
1320                        atomic64_dec(&fnic->io_cmpl_skip);
1321                else
1322                        atomic64_inc(&fnic_stats->io_stats.io_completions);
1323
1324                /* Complete the command to SCSI */
1325                if (sc->scsi_done) {
1326                        FNIC_TRACE(fnic_cleanup_io,
1327                                  sc->device->host->host_no, i, sc,
1328                                  jiffies_to_msecs(jiffies - start_time),
1329                                  0, ((u64)sc->cmnd[0] << 32 |
1330                                  (u64)sc->cmnd[2] << 24 |
1331                                  (u64)sc->cmnd[3] << 16 |
1332                                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1333                                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1334
1335                        sc->scsi_done(sc);
1336                }
1337        }
1338}
1339
1340void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1341                                  struct fcpio_host_req *desc)
1342{
1343        u32 id;
1344        struct fnic *fnic = vnic_dev_priv(wq->vdev);
1345        struct fnic_io_req *io_req;
1346        struct scsi_cmnd *sc;
1347        unsigned long flags;
1348        spinlock_t *io_lock;
1349        unsigned long start_time = 0;
1350
1351        /* get the tag reference */
1352        fcpio_tag_id_dec(&desc->hdr.tag, &id);
1353        id &= FNIC_TAG_MASK;
1354
1355        if (id >= fnic->fnic_max_tag_id)
1356                return;
1357
1358        sc = scsi_host_find_tag(fnic->lport->host, id);
1359        if (!sc)
1360                return;
1361
1362        io_lock = fnic_io_lock_hash(fnic, sc);
1363        spin_lock_irqsave(io_lock, flags);
1364
1365        /* Get the IO context which this desc refers to */
1366        io_req = (struct fnic_io_req *)CMD_SP(sc);
1367
1368        /* fnic interrupts are turned off by now */
1369
1370        if (!io_req) {
1371                spin_unlock_irqrestore(io_lock, flags);
1372                goto wq_copy_cleanup_scsi_cmd;
1373        }
1374
1375        CMD_SP(sc) = NULL;
1376
1377        spin_unlock_irqrestore(io_lock, flags);
1378
1379        start_time = io_req->start_time;
1380        fnic_release_ioreq_buf(fnic, io_req, sc);
1381        mempool_free(io_req, fnic->io_req_pool);
1382
1383wq_copy_cleanup_scsi_cmd:
1384        sc->result = DID_NO_CONNECT << 16;
1385        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1386                      " DID_NO_CONNECT\n");
1387
1388        if (sc->scsi_done) {
1389                FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1390                          sc->device->host->host_no, id, sc,
1391                          jiffies_to_msecs(jiffies - start_time),
1392                          0, ((u64)sc->cmnd[0] << 32 |
1393                          (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1394                          (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1395                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1396
1397                sc->scsi_done(sc);
1398        }
1399}
1400
1401static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1402                                          u32 task_req, u8 *fc_lun,
1403                                          struct fnic_io_req *io_req)
1404{
1405        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1406        struct Scsi_Host *host = fnic->lport->host;
1407        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1408        unsigned long flags;
1409
1410        spin_lock_irqsave(host->host_lock, flags);
1411        if (unlikely(fnic_chk_state_flags_locked(fnic,
1412                                                FNIC_FLAGS_IO_BLOCKED))) {
1413                spin_unlock_irqrestore(host->host_lock, flags);
1414                return 1;
1415        } else
1416                atomic_inc(&fnic->in_flight);
1417        spin_unlock_irqrestore(host->host_lock, flags);
1418
1419        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1420
1421        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1422                free_wq_copy_descs(fnic, wq);
1423
1424        if (!vnic_wq_copy_desc_avail(wq)) {
1425                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1426                atomic_dec(&fnic->in_flight);
1427                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1428                        "fnic_queue_abort_io_req: failure: no descriptors\n");
1429                atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1430                return 1;
1431        }
1432        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1433                                     0, task_req, tag, fc_lun, io_req->port_id,
1434                                     fnic->config.ra_tov, fnic->config.ed_tov);
1435
1436        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1437        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1438                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1439                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1440                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1441
1442        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1443        atomic_dec(&fnic->in_flight);
1444
1445        return 0;
1446}
1447
1448static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1449{
1450        int tag;
1451        int abt_tag;
1452        int term_cnt = 0;
1453        struct fnic_io_req *io_req;
1454        spinlock_t *io_lock;
1455        unsigned long flags;
1456        struct scsi_cmnd *sc;
1457        struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1458        struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1459        struct scsi_lun fc_lun;
1460        enum fnic_ioreq_state old_ioreq_state;
1461
1462        FNIC_SCSI_DBG(KERN_DEBUG,
1463                      fnic->lport->host,
1464                      "fnic_rport_exch_reset called portid 0x%06x\n",
1465                      port_id);
1466
1467        if (fnic->in_remove)
1468                return;
1469
1470        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1471                abt_tag = tag;
1472                io_lock = fnic_io_lock_tag(fnic, tag);
1473                spin_lock_irqsave(io_lock, flags);
1474                sc = scsi_host_find_tag(fnic->lport->host, tag);
1475                if (!sc) {
1476                        spin_unlock_irqrestore(io_lock, flags);
1477                        continue;
1478                }
1479
1480                io_req = (struct fnic_io_req *)CMD_SP(sc);
1481
1482                if (!io_req || io_req->port_id != port_id) {
1483                        spin_unlock_irqrestore(io_lock, flags);
1484                        continue;
1485                }
1486
1487                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1488                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1489                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1490                        "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1491                        sc);
1492                        spin_unlock_irqrestore(io_lock, flags);
1493                        continue;
1494                }
1495
1496                /*
1497                 * Found IO that is still pending with firmware and
1498                 * belongs to rport that went away
1499                 */
1500                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1501                        spin_unlock_irqrestore(io_lock, flags);
1502                        continue;
1503                }
1504                if (io_req->abts_done) {
1505                        shost_printk(KERN_ERR, fnic->lport->host,
1506                        "fnic_rport_exch_reset: io_req->abts_done is set "
1507                        "state is %s\n",
1508                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1509                }
1510
1511                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1512                        shost_printk(KERN_ERR, fnic->lport->host,
1513                                  "rport_exch_reset "
1514                                  "IO not yet issued %p tag 0x%x flags "
1515                                  "%x state %d\n",
1516                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1517                }
1518                old_ioreq_state = CMD_STATE(sc);
1519                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1520                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1521                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1522                        atomic64_inc(&reset_stats->device_reset_terminates);
1523                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1524                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1525                        "fnic_rport_exch_reset dev rst sc 0x%p\n",
1526                        sc);
1527                }
1528
1529                BUG_ON(io_req->abts_done);
1530
1531                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1532                              "fnic_rport_reset_exch: Issuing abts\n");
1533
1534                spin_unlock_irqrestore(io_lock, flags);
1535
1536                /* Now queue the abort command to firmware */
1537                int_to_scsilun(sc->device->lun, &fc_lun);
1538
1539                if (fnic_queue_abort_io_req(fnic, abt_tag,
1540                                            FCPIO_ITMF_ABT_TASK_TERM,
1541                                            fc_lun.scsi_lun, io_req)) {
1542                        /*
1543                         * Revert the cmd state back to old state, if
1544                         * it hasn't changed in between. This cmd will get
1545                         * aborted later by scsi_eh, or cleaned up during
1546                         * lun reset
1547                         */
1548                        spin_lock_irqsave(io_lock, flags);
1549                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1550                                CMD_STATE(sc) = old_ioreq_state;
1551                        spin_unlock_irqrestore(io_lock, flags);
1552                } else {
1553                        spin_lock_irqsave(io_lock, flags);
1554                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1555                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1556                        else
1557                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1558                        spin_unlock_irqrestore(io_lock, flags);
1559                        atomic64_inc(&term_stats->terminates);
1560                        term_cnt++;
1561                }
1562        }
1563        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1564                atomic64_set(&term_stats->max_terminates, term_cnt);
1565
1566}
1567
1568void fnic_terminate_rport_io(struct fc_rport *rport)
1569{
1570        int tag;
1571        int abt_tag;
1572        int term_cnt = 0;
1573        struct fnic_io_req *io_req;
1574        spinlock_t *io_lock;
1575        unsigned long flags;
1576        struct scsi_cmnd *sc;
1577        struct scsi_lun fc_lun;
1578        struct fc_rport_libfc_priv *rdata;
1579        struct fc_lport *lport;
1580        struct fnic *fnic;
1581        struct fc_rport *cmd_rport;
1582        struct reset_stats *reset_stats;
1583        struct terminate_stats *term_stats;
1584        enum fnic_ioreq_state old_ioreq_state;
1585
1586        if (!rport) {
1587                printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1588                return;
1589        }
1590        rdata = rport->dd_data;
1591
1592        if (!rdata) {
1593                printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1594                return;
1595        }
1596        lport = rdata->local_port;
1597
1598        if (!lport) {
1599                printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1600                return;
1601        }
1602        fnic = lport_priv(lport);
1603        FNIC_SCSI_DBG(KERN_DEBUG,
1604                      fnic->lport->host, "fnic_terminate_rport_io called"
1605                      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1606                      rport->port_name, rport->node_name, rport,
1607                      rport->port_id);
1608
1609        if (fnic->in_remove)
1610                return;
1611
1612        reset_stats = &fnic->fnic_stats.reset_stats;
1613        term_stats = &fnic->fnic_stats.term_stats;
1614
1615        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1616                abt_tag = tag;
1617                io_lock = fnic_io_lock_tag(fnic, tag);
1618                spin_lock_irqsave(io_lock, flags);
1619                sc = scsi_host_find_tag(fnic->lport->host, tag);
1620                if (!sc) {
1621                        spin_unlock_irqrestore(io_lock, flags);
1622                        continue;
1623                }
1624
1625                cmd_rport = starget_to_rport(scsi_target(sc->device));
1626                if (rport != cmd_rport) {
1627                        spin_unlock_irqrestore(io_lock, flags);
1628                        continue;
1629                }
1630
1631                io_req = (struct fnic_io_req *)CMD_SP(sc);
1632
1633                if (!io_req || rport != cmd_rport) {
1634                        spin_unlock_irqrestore(io_lock, flags);
1635                        continue;
1636                }
1637
1638                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1639                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1640                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1641                        "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1642                        sc);
1643                        spin_unlock_irqrestore(io_lock, flags);
1644                        continue;
1645                }
1646                /*
1647                 * Found IO that is still pending with firmware and
1648                 * belongs to rport that went away
1649                 */
1650                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1651                        spin_unlock_irqrestore(io_lock, flags);
1652                        continue;
1653                }
1654                if (io_req->abts_done) {
1655                        shost_printk(KERN_ERR, fnic->lport->host,
1656                        "fnic_terminate_rport_io: io_req->abts_done is set "
1657                        "state is %s\n",
1658                        fnic_ioreq_state_to_str(CMD_STATE(sc)));
1659                }
1660                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1661                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1662                                  "fnic_terminate_rport_io "
1663                                  "IO not yet issued %p tag 0x%x flags "
1664                                  "%x state %d\n",
1665                                  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1666                }
1667                old_ioreq_state = CMD_STATE(sc);
1668                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1669                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1670                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1671                        atomic64_inc(&reset_stats->device_reset_terminates);
1672                        abt_tag = (tag | FNIC_TAG_DEV_RST);
1673                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1674                        "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1675                }
1676
1677                BUG_ON(io_req->abts_done);
1678
1679                FNIC_SCSI_DBG(KERN_DEBUG,
1680                              fnic->lport->host,
1681                              "fnic_terminate_rport_io: Issuing abts\n");
1682
1683                spin_unlock_irqrestore(io_lock, flags);
1684
1685                /* Now queue the abort command to firmware */
1686                int_to_scsilun(sc->device->lun, &fc_lun);
1687
1688                if (fnic_queue_abort_io_req(fnic, abt_tag,
1689                                            FCPIO_ITMF_ABT_TASK_TERM,
1690                                            fc_lun.scsi_lun, io_req)) {
1691                        /*
1692                         * Revert the cmd state back to old state, if
1693                         * it hasn't changed in between. This cmd will get
1694                         * aborted later by scsi_eh, or cleaned up during
1695                         * lun reset
1696                         */
1697                        spin_lock_irqsave(io_lock, flags);
1698                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1699                                CMD_STATE(sc) = old_ioreq_state;
1700                        spin_unlock_irqrestore(io_lock, flags);
1701                } else {
1702                        spin_lock_irqsave(io_lock, flags);
1703                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1704                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1705                        else
1706                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1707                        spin_unlock_irqrestore(io_lock, flags);
1708                        atomic64_inc(&term_stats->terminates);
1709                        term_cnt++;
1710                }
1711        }
1712        if (term_cnt > atomic64_read(&term_stats->max_terminates))
1713                atomic64_set(&term_stats->max_terminates, term_cnt);
1714
1715}
1716
1717/*
1718 * This function is exported to SCSI for sending abort cmnds.
1719 * A SCSI IO is represented by a io_req in the driver.
1720 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1721 */
1722int fnic_abort_cmd(struct scsi_cmnd *sc)
1723{
1724        struct fc_lport *lp;
1725        struct fnic *fnic;
1726        struct fnic_io_req *io_req = NULL;
1727        struct fc_rport *rport;
1728        spinlock_t *io_lock;
1729        unsigned long flags;
1730        unsigned long start_time = 0;
1731        int ret = SUCCESS;
1732        u32 task_req = 0;
1733        struct scsi_lun fc_lun;
1734        struct fnic_stats *fnic_stats;
1735        struct abort_stats *abts_stats;
1736        struct terminate_stats *term_stats;
1737        enum fnic_ioreq_state old_ioreq_state;
1738        int tag;
1739        DECLARE_COMPLETION_ONSTACK(tm_done);
1740
1741        /* Wait for rport to unblock */
1742        fc_block_scsi_eh(sc);
1743
1744        /* Get local-port, check ready and link up */
1745        lp = shost_priv(sc->device->host);
1746
1747        fnic = lport_priv(lp);
1748        fnic_stats = &fnic->fnic_stats;
1749        abts_stats = &fnic->fnic_stats.abts_stats;
1750        term_stats = &fnic->fnic_stats.term_stats;
1751
1752        rport = starget_to_rport(scsi_target(sc->device));
1753        tag = sc->request->tag;
1754        FNIC_SCSI_DBG(KERN_DEBUG,
1755                fnic->lport->host,
1756                "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
1757                rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1758
1759        CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1760
1761        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1762                ret = FAILED;
1763                goto fnic_abort_cmd_end;
1764        }
1765
1766        /*
1767         * Avoid a race between SCSI issuing the abort and the device
1768         * completing the command.
1769         *
1770         * If the command is already completed by the fw cmpl code,
1771         * we just return SUCCESS from here. This means that the abort
1772         * succeeded. In the SCSI ML, since the timeout for command has
1773         * happened, the completion wont actually complete the command
1774         * and it will be considered as an aborted command
1775         *
1776         * The CMD_SP will not be cleared except while holding io_req_lock.
1777         */
1778        io_lock = fnic_io_lock_hash(fnic, sc);
1779        spin_lock_irqsave(io_lock, flags);
1780        io_req = (struct fnic_io_req *)CMD_SP(sc);
1781        if (!io_req) {
1782                spin_unlock_irqrestore(io_lock, flags);
1783                goto fnic_abort_cmd_end;
1784        }
1785
1786        io_req->abts_done = &tm_done;
1787
1788        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1789                spin_unlock_irqrestore(io_lock, flags);
1790                goto wait_pending;
1791        }
1792        /*
1793         * Command is still pending, need to abort it
1794         * If the firmware completes the command after this point,
1795         * the completion wont be done till mid-layer, since abort
1796         * has already started.
1797         */
1798        old_ioreq_state = CMD_STATE(sc);
1799        CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1800        CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1801
1802        spin_unlock_irqrestore(io_lock, flags);
1803
1804        /*
1805         * Check readiness of the remote port. If the path to remote
1806         * port is up, then send abts to the remote port to terminate
1807         * the IO. Else, just locally terminate the IO in the firmware
1808         */
1809        if (fc_remote_port_chkready(rport) == 0)
1810                task_req = FCPIO_ITMF_ABT_TASK;
1811        else {
1812                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1813                task_req = FCPIO_ITMF_ABT_TASK_TERM;
1814        }
1815
1816        /* Now queue the abort command to firmware */
1817        int_to_scsilun(sc->device->lun, &fc_lun);
1818
1819        if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1820                                    fc_lun.scsi_lun, io_req)) {
1821                spin_lock_irqsave(io_lock, flags);
1822                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1823                        CMD_STATE(sc) = old_ioreq_state;
1824                io_req = (struct fnic_io_req *)CMD_SP(sc);
1825                if (io_req)
1826                        io_req->abts_done = NULL;
1827                spin_unlock_irqrestore(io_lock, flags);
1828                ret = FAILED;
1829                goto fnic_abort_cmd_end;
1830        }
1831        if (task_req == FCPIO_ITMF_ABT_TASK) {
1832                CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1833                atomic64_inc(&fnic_stats->abts_stats.aborts);
1834        } else {
1835                CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1836                atomic64_inc(&fnic_stats->term_stats.terminates);
1837        }
1838
1839        /*
1840         * We queued an abort IO, wait for its completion.
1841         * Once the firmware completes the abort command, it will
1842         * wake up this thread.
1843         */
1844 wait_pending:
1845        wait_for_completion_timeout(&tm_done,
1846                                    msecs_to_jiffies
1847                                    (2 * fnic->config.ra_tov +
1848                                     fnic->config.ed_tov));
1849
1850        /* Check the abort status */
1851        spin_lock_irqsave(io_lock, flags);
1852
1853        io_req = (struct fnic_io_req *)CMD_SP(sc);
1854        if (!io_req) {
1855                atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1856                spin_unlock_irqrestore(io_lock, flags);
1857                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1858                ret = FAILED;
1859                goto fnic_abort_cmd_end;
1860        }
1861        io_req->abts_done = NULL;
1862
1863        /* fw did not complete abort, timed out */
1864        if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1865                spin_unlock_irqrestore(io_lock, flags);
1866                if (task_req == FCPIO_ITMF_ABT_TASK) {
1867                        atomic64_inc(&abts_stats->abort_drv_timeouts);
1868                } else {
1869                        atomic64_inc(&term_stats->terminate_drv_timeouts);
1870                }
1871                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1872                ret = FAILED;
1873                goto fnic_abort_cmd_end;
1874        }
1875
1876        CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1877
1878        /*
1879         * firmware completed the abort, check the status,
1880         * free the io_req irrespective of failure or success
1881         */
1882        if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1883                ret = FAILED;
1884
1885        CMD_SP(sc) = NULL;
1886
1887        spin_unlock_irqrestore(io_lock, flags);
1888
1889        start_time = io_req->start_time;
1890        fnic_release_ioreq_buf(fnic, io_req, sc);
1891        mempool_free(io_req, fnic->io_req_pool);
1892
1893fnic_abort_cmd_end:
1894        FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
1895                  sc->request->tag, sc,
1896                  jiffies_to_msecs(jiffies - start_time),
1897                  0, ((u64)sc->cmnd[0] << 32 |
1898                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1899                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1900                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1901
1902        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1903                      "Returning from abort cmd type %x %s\n", task_req,
1904                      (ret == SUCCESS) ?
1905                      "SUCCESS" : "FAILED");
1906        return ret;
1907}
1908
1909static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1910                                       struct scsi_cmnd *sc,
1911                                       struct fnic_io_req *io_req)
1912{
1913        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1914        struct Scsi_Host *host = fnic->lport->host;
1915        struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1916        struct scsi_lun fc_lun;
1917        int ret = 0;
1918        unsigned long intr_flags;
1919
1920        spin_lock_irqsave(host->host_lock, intr_flags);
1921        if (unlikely(fnic_chk_state_flags_locked(fnic,
1922                                                FNIC_FLAGS_IO_BLOCKED))) {
1923                spin_unlock_irqrestore(host->host_lock, intr_flags);
1924                return FAILED;
1925        } else
1926                atomic_inc(&fnic->in_flight);
1927        spin_unlock_irqrestore(host->host_lock, intr_flags);
1928
1929        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1930
1931        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1932                free_wq_copy_descs(fnic, wq);
1933
1934        if (!vnic_wq_copy_desc_avail(wq)) {
1935                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1936                          "queue_dr_io_req failure - no descriptors\n");
1937                atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
1938                ret = -EAGAIN;
1939                goto lr_io_req_end;
1940        }
1941
1942        /* fill in the lun info */
1943        int_to_scsilun(sc->device->lun, &fc_lun);
1944
1945        fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1946                                     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1947                                     fc_lun.scsi_lun, io_req->port_id,
1948                                     fnic->config.ra_tov, fnic->config.ed_tov);
1949
1950        atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1951        if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1952                  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1953                atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1954                  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1955
1956lr_io_req_end:
1957        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1958        atomic_dec(&fnic->in_flight);
1959
1960        return ret;
1961}
1962
1963/*
1964 * Clean up any pending aborts on the lun
1965 * For each outstanding IO on this lun, whose abort is not completed by fw,
1966 * issue a local abort. Wait for abort to complete. Return 0 if all commands
1967 * successfully aborted, 1 otherwise
1968 */
1969static int fnic_clean_pending_aborts(struct fnic *fnic,
1970                                     struct scsi_cmnd *lr_sc)
1971{
1972        int tag, abt_tag;
1973        struct fnic_io_req *io_req;
1974        spinlock_t *io_lock;
1975        unsigned long flags;
1976        int ret = 0;
1977        struct scsi_cmnd *sc;
1978        struct scsi_lun fc_lun;
1979        struct scsi_device *lun_dev = lr_sc->device;
1980        DECLARE_COMPLETION_ONSTACK(tm_done);
1981        enum fnic_ioreq_state old_ioreq_state;
1982
1983        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1984                io_lock = fnic_io_lock_tag(fnic, tag);
1985                spin_lock_irqsave(io_lock, flags);
1986                sc = scsi_host_find_tag(fnic->lport->host, tag);
1987                /*
1988                 * ignore this lun reset cmd or cmds that do not belong to
1989                 * this lun
1990                 */
1991                if (!sc || sc == lr_sc || sc->device != lun_dev) {
1992                        spin_unlock_irqrestore(io_lock, flags);
1993                        continue;
1994                }
1995
1996                io_req = (struct fnic_io_req *)CMD_SP(sc);
1997
1998                if (!io_req || sc->device != lun_dev) {
1999                        spin_unlock_irqrestore(io_lock, flags);
2000                        continue;
2001                }
2002
2003                /*
2004                 * Found IO that is still pending with firmware and
2005                 * belongs to the LUN that we are resetting
2006                 */
2007                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2008                              "Found IO in %s on lun\n",
2009                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2010
2011                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2012                        spin_unlock_irqrestore(io_lock, flags);
2013                        continue;
2014                }
2015                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2016                        (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2017                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2018                                "%s dev rst not pending sc 0x%p\n", __func__,
2019                                sc);
2020                        spin_unlock_irqrestore(io_lock, flags);
2021                        continue;
2022                }
2023
2024                if (io_req->abts_done)
2025                        shost_printk(KERN_ERR, fnic->lport->host,
2026                          "%s: io_req->abts_done is set state is %s\n",
2027                          __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2028                old_ioreq_state = CMD_STATE(sc);
2029                /*
2030                 * Any pending IO issued prior to reset is expected to be
2031                 * in abts pending state, if not we need to set
2032                 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2033                 * When IO is completed, the IO will be handed over and
2034                 * handled in this function.
2035                 */
2036                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2037
2038                BUG_ON(io_req->abts_done);
2039
2040                abt_tag = tag;
2041                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2042                        abt_tag |= FNIC_TAG_DEV_RST;
2043                        FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2044                                  "%s: dev rst sc 0x%p\n", __func__, sc);
2045                }
2046
2047                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2048                io_req->abts_done = &tm_done;
2049                spin_unlock_irqrestore(io_lock, flags);
2050
2051                /* Now queue the abort command to firmware */
2052                int_to_scsilun(sc->device->lun, &fc_lun);
2053
2054                if (fnic_queue_abort_io_req(fnic, abt_tag,
2055                                            FCPIO_ITMF_ABT_TASK_TERM,
2056                                            fc_lun.scsi_lun, io_req)) {
2057                        spin_lock_irqsave(io_lock, flags);
2058                        io_req = (struct fnic_io_req *)CMD_SP(sc);
2059                        if (io_req)
2060                                io_req->abts_done = NULL;
2061                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2062                                CMD_STATE(sc) = old_ioreq_state;
2063                        spin_unlock_irqrestore(io_lock, flags);
2064                        ret = 1;
2065                        goto clean_pending_aborts_end;
2066                } else {
2067                        spin_lock_irqsave(io_lock, flags);
2068                        if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2069                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2070                        spin_unlock_irqrestore(io_lock, flags);
2071                }
2072                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2073
2074                wait_for_completion_timeout(&tm_done,
2075                                            msecs_to_jiffies
2076                                            (fnic->config.ed_tov));
2077
2078                /* Recheck cmd state to check if it is now aborted */
2079                spin_lock_irqsave(io_lock, flags);
2080                io_req = (struct fnic_io_req *)CMD_SP(sc);
2081                if (!io_req) {
2082                        spin_unlock_irqrestore(io_lock, flags);
2083                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2084                        continue;
2085                }
2086
2087                io_req->abts_done = NULL;
2088
2089                /* if abort is still pending with fw, fail */
2090                if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2091                        spin_unlock_irqrestore(io_lock, flags);
2092                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2093                        ret = 1;
2094                        goto clean_pending_aborts_end;
2095                }
2096                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2097                CMD_SP(sc) = NULL;
2098                spin_unlock_irqrestore(io_lock, flags);
2099
2100                fnic_release_ioreq_buf(fnic, io_req, sc);
2101                mempool_free(io_req, fnic->io_req_pool);
2102        }
2103
2104        schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2105
2106        /* walk again to check, if IOs are still pending in fw */
2107        if (fnic_is_abts_pending(fnic, lr_sc))
2108                ret = FAILED;
2109
2110clean_pending_aborts_end:
2111        return ret;
2112}
2113
2114/**
2115 * fnic_scsi_host_start_tag
2116 * Allocates tagid from host's tag list
2117 **/
2118static inline int
2119fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2120{
2121        struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2122        int tag, ret = SCSI_NO_TAG;
2123
2124        BUG_ON(!bqt);
2125        if (!bqt) {
2126                pr_err("Tags are not supported\n");
2127                goto end;
2128        }
2129
2130        do {
2131                tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2132                if (tag >= bqt->max_depth) {
2133                        pr_err("Tag allocation failure\n");
2134                        goto end;
2135                }
2136        } while (test_and_set_bit(tag, bqt->tag_map));
2137
2138        bqt->tag_index[tag] = sc->request;
2139        sc->request->tag = tag;
2140        sc->tag = tag;
2141        if (!sc->request->special)
2142                sc->request->special = sc;
2143
2144        ret = tag;
2145
2146end:
2147        return ret;
2148}
2149
2150/**
2151 * fnic_scsi_host_end_tag
2152 * frees tag allocated by fnic_scsi_host_start_tag.
2153 **/
2154static inline void
2155fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2156{
2157        struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2158        int tag = sc->request->tag;
2159
2160        if (tag == SCSI_NO_TAG)
2161                return;
2162
2163        BUG_ON(!bqt || !bqt->tag_index[tag]);
2164        if (!bqt)
2165                return;
2166
2167        bqt->tag_index[tag] = NULL;
2168        clear_bit(tag, bqt->tag_map);
2169
2170        return;
2171}
2172
2173/*
2174 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2175 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2176 * on the LUN.
2177 */
2178int fnic_device_reset(struct scsi_cmnd *sc)
2179{
2180        struct fc_lport *lp;
2181        struct fnic *fnic;
2182        struct fnic_io_req *io_req = NULL;
2183        struct fc_rport *rport;
2184        int status;
2185        int ret = FAILED;
2186        spinlock_t *io_lock;
2187        unsigned long flags;
2188        unsigned long start_time = 0;
2189        struct scsi_lun fc_lun;
2190        struct fnic_stats *fnic_stats;
2191        struct reset_stats *reset_stats;
2192        int tag = 0;
2193        DECLARE_COMPLETION_ONSTACK(tm_done);
2194        int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
2195
2196        /* Wait for rport to unblock */
2197        fc_block_scsi_eh(sc);
2198
2199        /* Get local-port, check ready and link up */
2200        lp = shost_priv(sc->device->host);
2201
2202        fnic = lport_priv(lp);
2203        fnic_stats = &fnic->fnic_stats;
2204        reset_stats = &fnic->fnic_stats.reset_stats;
2205
2206        atomic64_inc(&reset_stats->device_resets);
2207
2208        rport = starget_to_rport(scsi_target(sc->device));
2209        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2210                      "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
2211                      rport->port_id, sc->device->lun, sc);
2212
2213        if (lp->state != LPORT_ST_READY || !(lp->link_up))
2214                goto fnic_device_reset_end;
2215
2216        /* Check if remote port up */
2217        if (fc_remote_port_chkready(rport)) {
2218                atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2219                goto fnic_device_reset_end;
2220        }
2221
2222        CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2223        /* Allocate tag if not present */
2224
2225        tag = sc->request->tag;
2226        if (unlikely(tag < 0)) {
2227                tag = fnic_scsi_host_start_tag(fnic, sc);
2228                if (unlikely(tag == SCSI_NO_TAG))
2229                        goto fnic_device_reset_end;
2230                tag_gen_flag = 1;
2231        }
2232        io_lock = fnic_io_lock_hash(fnic, sc);
2233        spin_lock_irqsave(io_lock, flags);
2234        io_req = (struct fnic_io_req *)CMD_SP(sc);
2235
2236        /*
2237         * If there is a io_req attached to this command, then use it,
2238         * else allocate a new one.
2239         */
2240        if (!io_req) {
2241                io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2242                if (!io_req) {
2243                        spin_unlock_irqrestore(io_lock, flags);
2244                        goto fnic_device_reset_end;
2245                }
2246                memset(io_req, 0, sizeof(*io_req));
2247                io_req->port_id = rport->port_id;
2248                CMD_SP(sc) = (char *)io_req;
2249        }
2250        io_req->dr_done = &tm_done;
2251        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2252        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2253        spin_unlock_irqrestore(io_lock, flags);
2254
2255        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2256
2257        /*
2258         * issue the device reset, if enqueue failed, clean up the ioreq
2259         * and break assoc with scsi cmd
2260         */
2261        if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2262                spin_lock_irqsave(io_lock, flags);
2263                io_req = (struct fnic_io_req *)CMD_SP(sc);
2264                if (io_req)
2265                        io_req->dr_done = NULL;
2266                goto fnic_device_reset_clean;
2267        }
2268        spin_lock_irqsave(io_lock, flags);
2269        CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2270        spin_unlock_irqrestore(io_lock, flags);
2271
2272        /*
2273         * Wait on the local completion for LUN reset.  The io_req may be
2274         * freed while we wait since we hold no lock.
2275         */
2276        wait_for_completion_timeout(&tm_done,
2277                                    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2278
2279        spin_lock_irqsave(io_lock, flags);
2280        io_req = (struct fnic_io_req *)CMD_SP(sc);
2281        if (!io_req) {
2282                spin_unlock_irqrestore(io_lock, flags);
2283                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2284                                "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2285                goto fnic_device_reset_end;
2286        }
2287        io_req->dr_done = NULL;
2288
2289        status = CMD_LR_STATUS(sc);
2290
2291        /*
2292         * If lun reset not completed, bail out with failed. io_req
2293         * gets cleaned up during higher levels of EH
2294         */
2295        if (status == FCPIO_INVALID_CODE) {
2296                atomic64_inc(&reset_stats->device_reset_timeouts);
2297                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2298                              "Device reset timed out\n");
2299                CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2300                spin_unlock_irqrestore(io_lock, flags);
2301                int_to_scsilun(sc->device->lun, &fc_lun);
2302                /*
2303                 * Issue abort and terminate on device reset request.
2304                 * If q'ing of terminate fails, retry it after a delay.
2305                 */
2306                while (1) {
2307                        spin_lock_irqsave(io_lock, flags);
2308                        if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2309                                spin_unlock_irqrestore(io_lock, flags);
2310                                break;
2311                        }
2312                        spin_unlock_irqrestore(io_lock, flags);
2313                        if (fnic_queue_abort_io_req(fnic,
2314                                tag | FNIC_TAG_DEV_RST,
2315                                FCPIO_ITMF_ABT_TASK_TERM,
2316                                fc_lun.scsi_lun, io_req)) {
2317                                wait_for_completion_timeout(&tm_done,
2318                                msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2319                        } else {
2320                                spin_lock_irqsave(io_lock, flags);
2321                                CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2322                                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2323                                io_req->abts_done = &tm_done;
2324                                spin_unlock_irqrestore(io_lock, flags);
2325                                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2326                                "Abort and terminate issued on Device reset "
2327                                "tag 0x%x sc 0x%p\n", tag, sc);
2328                                break;
2329                        }
2330                }
2331                while (1) {
2332                        spin_lock_irqsave(io_lock, flags);
2333                        if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2334                                spin_unlock_irqrestore(io_lock, flags);
2335                                wait_for_completion_timeout(&tm_done,
2336                                msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2337                                break;
2338                        } else {
2339                                io_req = (struct fnic_io_req *)CMD_SP(sc);
2340                                io_req->abts_done = NULL;
2341                                goto fnic_device_reset_clean;
2342                        }
2343                }
2344        } else {
2345                spin_unlock_irqrestore(io_lock, flags);
2346        }
2347
2348        /* Completed, but not successful, clean up the io_req, return fail */
2349        if (status != FCPIO_SUCCESS) {
2350                spin_lock_irqsave(io_lock, flags);
2351                FNIC_SCSI_DBG(KERN_DEBUG,
2352                              fnic->lport->host,
2353                              "Device reset completed - failed\n");
2354                io_req = (struct fnic_io_req *)CMD_SP(sc);
2355                goto fnic_device_reset_clean;
2356        }
2357
2358        /*
2359         * Clean up any aborts on this lun that have still not
2360         * completed. If any of these fail, then LUN reset fails.
2361         * clean_pending_aborts cleans all cmds on this lun except
2362         * the lun reset cmd. If all cmds get cleaned, the lun reset
2363         * succeeds
2364         */
2365        if (fnic_clean_pending_aborts(fnic, sc)) {
2366                spin_lock_irqsave(io_lock, flags);
2367                io_req = (struct fnic_io_req *)CMD_SP(sc);
2368                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2369                              "Device reset failed"
2370                              " since could not abort all IOs\n");
2371                goto fnic_device_reset_clean;
2372        }
2373
2374        /* Clean lun reset command */
2375        spin_lock_irqsave(io_lock, flags);
2376        io_req = (struct fnic_io_req *)CMD_SP(sc);
2377        if (io_req)
2378                /* Completed, and successful */
2379                ret = SUCCESS;
2380
2381fnic_device_reset_clean:
2382        if (io_req)
2383                CMD_SP(sc) = NULL;
2384
2385        spin_unlock_irqrestore(io_lock, flags);
2386
2387        if (io_req) {
2388                start_time = io_req->start_time;
2389                fnic_release_ioreq_buf(fnic, io_req, sc);
2390                mempool_free(io_req, fnic->io_req_pool);
2391        }
2392
2393fnic_device_reset_end:
2394        FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2395                  sc->request->tag, sc,
2396                  jiffies_to_msecs(jiffies - start_time),
2397                  0, ((u64)sc->cmnd[0] << 32 |
2398                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2399                  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2400                  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2401
2402        /* free tag if it is allocated */
2403        if (unlikely(tag_gen_flag))
2404                fnic_scsi_host_end_tag(fnic, sc);
2405
2406        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2407                      "Returning from device reset %s\n",
2408                      (ret == SUCCESS) ?
2409                      "SUCCESS" : "FAILED");
2410
2411        if (ret == FAILED)
2412                atomic64_inc(&reset_stats->device_reset_failures);
2413
2414        return ret;
2415}
2416
2417/* Clean up all IOs, clean up libFC local port */
2418int fnic_reset(struct Scsi_Host *shost)
2419{
2420        struct fc_lport *lp;
2421        struct fnic *fnic;
2422        int ret = 0;
2423        struct reset_stats *reset_stats;
2424
2425        lp = shost_priv(shost);
2426        fnic = lport_priv(lp);
2427        reset_stats = &fnic->fnic_stats.reset_stats;
2428
2429        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2430                      "fnic_reset called\n");
2431
2432        atomic64_inc(&reset_stats->fnic_resets);
2433
2434        /*
2435         * Reset local port, this will clean up libFC exchanges,
2436         * reset remote port sessions, and if link is up, begin flogi
2437         */
2438        ret = lp->tt.lport_reset(lp);
2439
2440        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2441                      "Returning from fnic reset %s\n",
2442                      (ret == 0) ?
2443                      "SUCCESS" : "FAILED");
2444
2445        if (ret == 0)
2446                atomic64_inc(&reset_stats->fnic_reset_completions);
2447        else
2448                atomic64_inc(&reset_stats->fnic_reset_failures);
2449
2450        return ret;
2451}
2452
2453/*
2454 * SCSI Error handling calls driver's eh_host_reset if all prior
2455 * error handling levels return FAILED. If host reset completes
2456 * successfully, and if link is up, then Fabric login begins.
2457 *
2458 * Host Reset is the highest level of error recovery. If this fails, then
2459 * host is offlined by SCSI.
2460 *
2461 */
2462int fnic_host_reset(struct scsi_cmnd *sc)
2463{
2464        int ret;
2465        unsigned long wait_host_tmo;
2466        struct Scsi_Host *shost = sc->device->host;
2467        struct fc_lport *lp = shost_priv(shost);
2468
2469        /*
2470         * If fnic_reset is successful, wait for fabric login to complete
2471         * scsi-ml tries to send a TUR to every device if host reset is
2472         * successful, so before returning to scsi, fabric should be up
2473         */
2474        ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2475        if (ret == SUCCESS) {
2476                wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2477                ret = FAILED;
2478                while (time_before(jiffies, wait_host_tmo)) {
2479                        if ((lp->state == LPORT_ST_READY) &&
2480                            (lp->link_up)) {
2481                                ret = SUCCESS;
2482                                break;
2483                        }
2484                        ssleep(1);
2485                }
2486        }
2487
2488        return ret;
2489}
2490
2491/*
2492 * This fxn is called from libFC when host is removed
2493 */
2494void fnic_scsi_abort_io(struct fc_lport *lp)
2495{
2496        int err = 0;
2497        unsigned long flags;
2498        enum fnic_state old_state;
2499        struct fnic *fnic = lport_priv(lp);
2500        DECLARE_COMPLETION_ONSTACK(remove_wait);
2501
2502        /* Issue firmware reset for fnic, wait for reset to complete */
2503retry_fw_reset:
2504        spin_lock_irqsave(&fnic->fnic_lock, flags);
2505        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2506                /* fw reset is in progress, poll for its completion */
2507                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2508                schedule_timeout(msecs_to_jiffies(100));
2509                goto retry_fw_reset;
2510        }
2511
2512        fnic->remove_wait = &remove_wait;
2513        old_state = fnic->state;
2514        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2515        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2516        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2517
2518        err = fnic_fw_reset_handler(fnic);
2519        if (err) {
2520                spin_lock_irqsave(&fnic->fnic_lock, flags);
2521                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2522                        fnic->state = old_state;
2523                fnic->remove_wait = NULL;
2524                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2525                return;
2526        }
2527
2528        /* Wait for firmware reset to complete */
2529        wait_for_completion_timeout(&remove_wait,
2530                                    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2531
2532        spin_lock_irqsave(&fnic->fnic_lock, flags);
2533        fnic->remove_wait = NULL;
2534        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2535                      "fnic_scsi_abort_io %s\n",
2536                      (fnic->state == FNIC_IN_ETH_MODE) ?
2537                      "SUCCESS" : "FAILED");
2538        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2539
2540}
2541
2542/*
2543 * This fxn called from libFC to clean up driver IO state on link down
2544 */
2545void fnic_scsi_cleanup(struct fc_lport *lp)
2546{
2547        unsigned long flags;
2548        enum fnic_state old_state;
2549        struct fnic *fnic = lport_priv(lp);
2550
2551        /* issue fw reset */
2552retry_fw_reset:
2553        spin_lock_irqsave(&fnic->fnic_lock, flags);
2554        if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2555                /* fw reset is in progress, poll for its completion */
2556                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2557                schedule_timeout(msecs_to_jiffies(100));
2558                goto retry_fw_reset;
2559        }
2560        old_state = fnic->state;
2561        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2562        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2563        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2564
2565        if (fnic_fw_reset_handler(fnic)) {
2566                spin_lock_irqsave(&fnic->fnic_lock, flags);
2567                if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2568                        fnic->state = old_state;
2569                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2570        }
2571
2572}
2573
2574void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2575{
2576}
2577
2578void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2579{
2580        struct fnic *fnic = lport_priv(lp);
2581
2582        /* Non-zero sid, nothing to do */
2583        if (sid)
2584                goto call_fc_exch_mgr_reset;
2585
2586        if (did) {
2587                fnic_rport_exch_reset(fnic, did);
2588                goto call_fc_exch_mgr_reset;
2589        }
2590
2591        /*
2592         * sid = 0, did = 0
2593         * link down or device being removed
2594         */
2595        if (!fnic->in_remove)
2596                fnic_scsi_cleanup(lp);
2597        else
2598                fnic_scsi_abort_io(lp);
2599
2600        /* call libFC exch mgr reset to reset its exchanges */
2601call_fc_exch_mgr_reset:
2602        fc_exch_mgr_reset(lp, sid, did);
2603
2604}
2605
2606/*
2607 * fnic_is_abts_pending() is a helper function that
2608 * walks through tag map to check if there is any IOs pending,if there is one,
2609 * then it returns 1 (true), otherwise 0 (false)
2610 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2611 * otherwise, it checks for all IOs.
2612 */
2613int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2614{
2615        int tag;
2616        struct fnic_io_req *io_req;
2617        spinlock_t *io_lock;
2618        unsigned long flags;
2619        int ret = 0;
2620        struct scsi_cmnd *sc;
2621        struct scsi_device *lun_dev = NULL;
2622
2623        if (lr_sc)
2624                lun_dev = lr_sc->device;
2625
2626        /* walk again to check, if IOs are still pending in fw */
2627        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2628                sc = scsi_host_find_tag(fnic->lport->host, tag);
2629                /*
2630                 * ignore this lun reset cmd or cmds that do not belong to
2631                 * this lun
2632                 */
2633                if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2634                        continue;
2635
2636                io_lock = fnic_io_lock_hash(fnic, sc);
2637                spin_lock_irqsave(io_lock, flags);
2638
2639                io_req = (struct fnic_io_req *)CMD_SP(sc);
2640
2641                if (!io_req || sc->device != lun_dev) {
2642                        spin_unlock_irqrestore(io_lock, flags);
2643                        continue;
2644                }
2645
2646                /*
2647                 * Found IO that is still pending with firmware and
2648                 * belongs to the LUN that we are resetting
2649                 */
2650                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2651                              "Found IO in %s on lun\n",
2652                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
2653
2654                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2655                        ret = 1;
2656                spin_unlock_irqrestore(io_lock, flags);
2657        }
2658
2659        return ret;
2660}
2661