linux/drivers/scsi/lpfc/lpfc_scsi.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23#include <linux/pci.h>
  24#include <linux/slab.h>
  25#include <linux/interrupt.h>
  26#include <linux/export.h>
  27#include <linux/delay.h>
  28#include <asm/unaligned.h>
  29#include <linux/t10-pi.h>
  30#include <linux/crc-t10dif.h>
  31#include <linux/blk-cgroup.h>
  32#include <net/checksum.h>
  33
  34#include <scsi/scsi.h>
  35#include <scsi/scsi_device.h>
  36#include <scsi/scsi_eh.h>
  37#include <scsi/scsi_host.h>
  38#include <scsi/scsi_tcq.h>
  39#include <scsi/scsi_transport_fc.h>
  40
  41#include "lpfc_version.h"
  42#include "lpfc_hw4.h"
  43#include "lpfc_hw.h"
  44#include "lpfc_sli.h"
  45#include "lpfc_sli4.h"
  46#include "lpfc_nl.h"
  47#include "lpfc_disc.h"
  48#include "lpfc.h"
  49#include "lpfc_scsi.h"
  50#include "lpfc_logmsg.h"
  51#include "lpfc_crtn.h"
  52#include "lpfc_vport.h"
  53
  54#define LPFC_RESET_WAIT  2
  55#define LPFC_ABORT_WAIT  2
  56
  57static char *dif_op_str[] = {
  58        "PROT_NORMAL",
  59        "PROT_READ_INSERT",
  60        "PROT_WRITE_STRIP",
  61        "PROT_READ_STRIP",
  62        "PROT_WRITE_INSERT",
  63        "PROT_READ_PASS",
  64        "PROT_WRITE_PASS",
  65};
  66
  67struct scsi_dif_tuple {
  68        __be16 guard_tag;       /* Checksum */
  69        __be16 app_tag;         /* Opaque storage */
  70        __be32 ref_tag;         /* Target LBA or indirect LBA */
  71};
  72
  73static struct lpfc_rport_data *
  74lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
  75{
  76        struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
  77
  78        if (vport->phba->cfg_fof)
  79                return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
  80        else
  81                return (struct lpfc_rport_data *)sdev->hostdata;
  82}
  83
  84static void
  85lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
  86static void
  87lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
  88static int
  89lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
  90static void
  91lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
  92                           struct lpfc_vmid *vmp);
  93static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
  94                                   *cmd, struct lpfc_vmid *vmp,
  95                                   union lpfc_vmid_io_tag *tag);
  96static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
  97                                    struct lpfc_vmid *vmid);
  98
  99/**
 100 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
 101 * @phba: Pointer to HBA object.
 102 * @lpfc_cmd: lpfc scsi command object pointer.
 103 *
 104 * This function is called from the lpfc_prep_task_mgmt_cmd function to
 105 * set the last bit in the response sge entry.
 106 **/
 107static void
 108lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
 109                                struct lpfc_io_buf *lpfc_cmd)
 110{
 111        struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 112        if (sgl) {
 113                sgl += 1;
 114                sgl->word2 = le32_to_cpu(sgl->word2);
 115                bf_set(lpfc_sli4_sge_last, sgl, 1);
 116                sgl->word2 = cpu_to_le32(sgl->word2);
 117        }
 118}
 119
 120#define LPFC_INVALID_REFTAG ((u32)-1)
 121
 122/**
 123 * lpfc_update_stats - Update statistical data for the command completion
 124 * @vport: The virtual port on which this call is executing.
 125 * @lpfc_cmd: lpfc scsi command object pointer.
 126 *
 127 * This function is called when there is a command completion and this
 128 * function updates the statistical data for the command completion.
 129 **/
 130static void
 131lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
 132{
 133        struct lpfc_hba *phba = vport->phba;
 134        struct lpfc_rport_data *rdata;
 135        struct lpfc_nodelist *pnode;
 136        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
 137        unsigned long flags;
 138        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 139        unsigned long latency;
 140        int i;
 141
 142        if (!vport->stat_data_enabled ||
 143            vport->stat_data_blocked ||
 144            (cmd->result))
 145                return;
 146
 147        latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
 148        rdata = lpfc_cmd->rdata;
 149        pnode = rdata->pnode;
 150
 151        spin_lock_irqsave(shost->host_lock, flags);
 152        if (!pnode ||
 153            !pnode->lat_data ||
 154            (phba->bucket_type == LPFC_NO_BUCKET)) {
 155                spin_unlock_irqrestore(shost->host_lock, flags);
 156                return;
 157        }
 158
 159        if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
 160                i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
 161                        phba->bucket_step;
 162                /* check array subscript bounds */
 163                if (i < 0)
 164                        i = 0;
 165                else if (i >= LPFC_MAX_BUCKET_COUNT)
 166                        i = LPFC_MAX_BUCKET_COUNT - 1;
 167        } else {
 168                for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
 169                        if (latency <= (phba->bucket_base +
 170                                ((1<<i)*phba->bucket_step)))
 171                                break;
 172        }
 173
 174        pnode->lat_data[i].cmd_count++;
 175        spin_unlock_irqrestore(shost->host_lock, flags);
 176}
 177
 178/**
 179 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
 180 * @phba: The Hba for which this call is being executed.
 181 *
 182 * This routine is called when there is resource error in driver or firmware.
 183 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 184 * posts at most 1 event each second. This routine wakes up worker thread of
 185 * @phba to process WORKER_RAM_DOWN_EVENT event.
 186 *
 187 * This routine should be called with no lock held.
 188 **/
 189void
 190lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
 191{
 192        unsigned long flags;
 193        uint32_t evt_posted;
 194        unsigned long expires;
 195
 196        spin_lock_irqsave(&phba->hbalock, flags);
 197        atomic_inc(&phba->num_rsrc_err);
 198        phba->last_rsrc_error_time = jiffies;
 199
 200        expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
 201        if (time_after(expires, jiffies)) {
 202                spin_unlock_irqrestore(&phba->hbalock, flags);
 203                return;
 204        }
 205
 206        phba->last_ramp_down_time = jiffies;
 207
 208        spin_unlock_irqrestore(&phba->hbalock, flags);
 209
 210        spin_lock_irqsave(&phba->pport->work_port_lock, flags);
 211        evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
 212        if (!evt_posted)
 213                phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
 214        spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 215
 216        if (!evt_posted)
 217                lpfc_worker_wake_up(phba);
 218        return;
 219}
 220
 221/**
 222 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
 223 * @phba: The Hba for which this call is being executed.
 224 *
 225 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 226 * thread.This routine reduces queue depth for all scsi device on each vport
 227 * associated with @phba.
 228 **/
 229void
 230lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 231{
 232        struct lpfc_vport **vports;
 233        struct Scsi_Host  *shost;
 234        struct scsi_device *sdev;
 235        unsigned long new_queue_depth;
 236        unsigned long num_rsrc_err, num_cmd_success;
 237        int i;
 238
 239        num_rsrc_err = atomic_read(&phba->num_rsrc_err);
 240        num_cmd_success = atomic_read(&phba->num_cmd_success);
 241
 242        /*
 243         * The error and success command counters are global per
 244         * driver instance.  If another handler has already
 245         * operated on this error event, just exit.
 246         */
 247        if (num_rsrc_err == 0)
 248                return;
 249
 250        vports = lpfc_create_vport_work_array(phba);
 251        if (vports != NULL)
 252                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 253                        shost = lpfc_shost_from_vport(vports[i]);
 254                        shost_for_each_device(sdev, shost) {
 255                                new_queue_depth =
 256                                        sdev->queue_depth * num_rsrc_err /
 257                                        (num_rsrc_err + num_cmd_success);
 258                                if (!new_queue_depth)
 259                                        new_queue_depth = sdev->queue_depth - 1;
 260                                else
 261                                        new_queue_depth = sdev->queue_depth -
 262                                                                new_queue_depth;
 263                                scsi_change_queue_depth(sdev, new_queue_depth);
 264                        }
 265                }
 266        lpfc_destroy_vport_work_array(phba, vports);
 267        atomic_set(&phba->num_rsrc_err, 0);
 268        atomic_set(&phba->num_cmd_success, 0);
 269}
 270
 271/**
 272 * lpfc_scsi_dev_block - set all scsi hosts to block state
 273 * @phba: Pointer to HBA context object.
 274 *
 275 * This function walks vport list and set each SCSI host to block state
 276 * by invoking fc_remote_port_delete() routine. This function is invoked
 277 * with EEH when device's PCI slot has been permanently disabled.
 278 **/
 279void
 280lpfc_scsi_dev_block(struct lpfc_hba *phba)
 281{
 282        struct lpfc_vport **vports;
 283        struct Scsi_Host  *shost;
 284        struct scsi_device *sdev;
 285        struct fc_rport *rport;
 286        int i;
 287
 288        vports = lpfc_create_vport_work_array(phba);
 289        if (vports != NULL)
 290                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 291                        shost = lpfc_shost_from_vport(vports[i]);
 292                        shost_for_each_device(sdev, shost) {
 293                                rport = starget_to_rport(scsi_target(sdev));
 294                                fc_remote_port_delete(rport);
 295                        }
 296                }
 297        lpfc_destroy_vport_work_array(phba, vports);
 298}
 299
 300/**
 301 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
 302 * @vport: The virtual port for which this call being executed.
 303 * @num_to_alloc: The requested number of buffers to allocate.
 304 *
 305 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 306 * the scsi buffer contains all the necessary information needed to initiate
 307 * a SCSI I/O. The non-DMAable buffer region contains information to build
 308 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 309 * and the initial BPL. In addition to allocating memory, the FCP CMND and
 310 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
 311 *
 312 * Return codes:
 313 *   int - number of scsi buffers that were allocated.
 314 *   0 = failure, less than num_to_alloc is a partial failure.
 315 **/
 316static int
 317lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 318{
 319        struct lpfc_hba *phba = vport->phba;
 320        struct lpfc_io_buf *psb;
 321        struct ulp_bde64 *bpl;
 322        IOCB_t *iocb;
 323        dma_addr_t pdma_phys_fcp_cmd;
 324        dma_addr_t pdma_phys_fcp_rsp;
 325        dma_addr_t pdma_phys_sgl;
 326        uint16_t iotag;
 327        int bcnt, bpl_size;
 328
 329        bpl_size = phba->cfg_sg_dma_buf_size -
 330                (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
 331
 332        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 333                         "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
 334                         num_to_alloc, phba->cfg_sg_dma_buf_size,
 335                         (int)sizeof(struct fcp_cmnd),
 336                         (int)sizeof(struct fcp_rsp), bpl_size);
 337
 338        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
 339                psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
 340                if (!psb)
 341                        break;
 342
 343                /*
 344                 * Get memory from the pci pool to map the virt space to pci
 345                 * bus space for an I/O.  The DMA buffer includes space for the
 346                 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
 347                 * necessary to support the sg_tablesize.
 348                 */
 349                psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
 350                                        GFP_KERNEL, &psb->dma_handle);
 351                if (!psb->data) {
 352                        kfree(psb);
 353                        break;
 354                }
 355
 356
 357                /* Allocate iotag for psb->cur_iocbq. */
 358                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 359                if (iotag == 0) {
 360                        dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 361                                      psb->data, psb->dma_handle);
 362                        kfree(psb);
 363                        break;
 364                }
 365                psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
 366
 367                psb->fcp_cmnd = psb->data;
 368                psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
 369                psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
 370                        sizeof(struct fcp_rsp);
 371
 372                /* Initialize local short-hand pointers. */
 373                bpl = (struct ulp_bde64 *)psb->dma_sgl;
 374                pdma_phys_fcp_cmd = psb->dma_handle;
 375                pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
 376                pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
 377                        sizeof(struct fcp_rsp);
 378
 379                /*
 380                 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
 381                 * are sg list bdes.  Initialize the first two and leave the
 382                 * rest for queuecommand.
 383                 */
 384                bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
 385                bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
 386                bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
 387                bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 388                bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
 389
 390                /* Setup the physical region for the FCP RSP */
 391                bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
 392                bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
 393                bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
 394                bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 395                bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
 396
 397                /*
 398                 * Since the IOCB for the FCP I/O is built into this
 399                 * lpfc_scsi_buf, initialize it with all known data now.
 400                 */
 401                iocb = &psb->cur_iocbq.iocb;
 402                iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
 403                if ((phba->sli_rev == 3) &&
 404                                !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
 405                        /* fill in immediate fcp command BDE */
 406                        iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
 407                        iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
 408                        iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
 409                                        unsli3.fcp_ext.icd);
 410                        iocb->un.fcpi64.bdl.addrHigh = 0;
 411                        iocb->ulpBdeCount = 0;
 412                        iocb->ulpLe = 0;
 413                        /* fill in response BDE */
 414                        iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
 415                                                        BUFF_TYPE_BDE_64;
 416                        iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
 417                                sizeof(struct fcp_rsp);
 418                        iocb->unsli3.fcp_ext.rbde.addrLow =
 419                                putPaddrLow(pdma_phys_fcp_rsp);
 420                        iocb->unsli3.fcp_ext.rbde.addrHigh =
 421                                putPaddrHigh(pdma_phys_fcp_rsp);
 422                } else {
 423                        iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
 424                        iocb->un.fcpi64.bdl.bdeSize =
 425                                        (2 * sizeof(struct ulp_bde64));
 426                        iocb->un.fcpi64.bdl.addrLow =
 427                                        putPaddrLow(pdma_phys_sgl);
 428                        iocb->un.fcpi64.bdl.addrHigh =
 429                                        putPaddrHigh(pdma_phys_sgl);
 430                        iocb->ulpBdeCount = 1;
 431                        iocb->ulpLe = 1;
 432                }
 433                iocb->ulpClass = CLASS3;
 434                psb->status = IOSTAT_SUCCESS;
 435                /* Put it back into the SCSI buffer list */
 436                psb->cur_iocbq.context1  = psb;
 437                spin_lock_init(&psb->buf_lock);
 438                lpfc_release_scsi_buf_s3(phba, psb);
 439
 440        }
 441
 442        return bcnt;
 443}
 444
 445/**
 446 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
 447 * @vport: pointer to lpfc vport data structure.
 448 *
 449 * This routine is invoked by the vport cleanup for deletions and the cleanup
 450 * for an ndlp on removal.
 451 **/
 452void
 453lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
 454{
 455        struct lpfc_hba *phba = vport->phba;
 456        struct lpfc_io_buf *psb, *next_psb;
 457        struct lpfc_sli4_hdw_queue *qp;
 458        unsigned long iflag = 0;
 459        int idx;
 460
 461        if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
 462                return;
 463
 464        spin_lock_irqsave(&phba->hbalock, iflag);
 465        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
 466                qp = &phba->sli4_hba.hdwq[idx];
 467
 468                spin_lock(&qp->abts_io_buf_list_lock);
 469                list_for_each_entry_safe(psb, next_psb,
 470                                         &qp->lpfc_abts_io_buf_list, list) {
 471                        if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
 472                                continue;
 473
 474                        if (psb->rdata && psb->rdata->pnode &&
 475                            psb->rdata->pnode->vport == vport)
 476                                psb->rdata = NULL;
 477                }
 478                spin_unlock(&qp->abts_io_buf_list_lock);
 479        }
 480        spin_unlock_irqrestore(&phba->hbalock, iflag);
 481}
 482
 483/**
 484 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
 485 * @phba: pointer to lpfc hba data structure.
 486 * @axri: pointer to the fcp xri abort wcqe structure.
 487 * @idx: index into hdwq
 488 *
 489 * This routine is invoked by the worker thread to process a SLI4 fast-path
 490 * FCP or NVME aborted xri.
 491 **/
 492void
 493lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
 494                         struct sli4_wcqe_xri_aborted *axri, int idx)
 495{
 496        u16 xri = 0;
 497        u16 rxid = 0;
 498        struct lpfc_io_buf *psb, *next_psb;
 499        struct lpfc_sli4_hdw_queue *qp;
 500        unsigned long iflag = 0;
 501        struct lpfc_iocbq *iocbq;
 502        int i;
 503        struct lpfc_nodelist *ndlp;
 504        int rrq_empty = 0;
 505        struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
 506        struct scsi_cmnd *cmd;
 507        int offline = 0;
 508
 509        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
 510                return;
 511        offline = pci_channel_offline(phba->pcidev);
 512        if (!offline) {
 513                xri = bf_get(lpfc_wcqe_xa_xri, axri);
 514                rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
 515        }
 516        qp = &phba->sli4_hba.hdwq[idx];
 517        spin_lock_irqsave(&phba->hbalock, iflag);
 518        spin_lock(&qp->abts_io_buf_list_lock);
 519        list_for_each_entry_safe(psb, next_psb,
 520                &qp->lpfc_abts_io_buf_list, list) {
 521                if (offline)
 522                        xri = psb->cur_iocbq.sli4_xritag;
 523                if (psb->cur_iocbq.sli4_xritag == xri) {
 524                        list_del_init(&psb->list);
 525                        psb->flags &= ~LPFC_SBUF_XBUSY;
 526                        psb->status = IOSTAT_SUCCESS;
 527                        if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
 528                                qp->abts_nvme_io_bufs--;
 529                                spin_unlock(&qp->abts_io_buf_list_lock);
 530                                spin_unlock_irqrestore(&phba->hbalock, iflag);
 531                                if (!offline) {
 532                                        lpfc_sli4_nvme_xri_aborted(phba, axri,
 533                                                                   psb);
 534                                        return;
 535                                }
 536                                lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
 537                                spin_lock_irqsave(&phba->hbalock, iflag);
 538                                spin_lock(&qp->abts_io_buf_list_lock);
 539                                continue;
 540                        }
 541                        qp->abts_scsi_io_bufs--;
 542                        spin_unlock(&qp->abts_io_buf_list_lock);
 543
 544                        if (psb->rdata && psb->rdata->pnode)
 545                                ndlp = psb->rdata->pnode;
 546                        else
 547                                ndlp = NULL;
 548
 549                        rrq_empty = list_empty(&phba->active_rrq_list);
 550                        spin_unlock_irqrestore(&phba->hbalock, iflag);
 551                        if (ndlp && !offline) {
 552                                lpfc_set_rrq_active(phba, ndlp,
 553                                        psb->cur_iocbq.sli4_lxritag, rxid, 1);
 554                                lpfc_sli4_abts_err_handler(phba, ndlp, axri);
 555                        }
 556
 557                        if (phba->cfg_fcp_wait_abts_rsp || offline) {
 558                                spin_lock_irqsave(&psb->buf_lock, iflag);
 559                                cmd = psb->pCmd;
 560                                psb->pCmd = NULL;
 561                                spin_unlock_irqrestore(&psb->buf_lock, iflag);
 562
 563                                /* The sdev is not guaranteed to be valid post
 564                                 * scsi_done upcall.
 565                                 */
 566                                if (cmd)
 567                                        scsi_done(cmd);
 568
 569                                /*
 570                                 * We expect there is an abort thread waiting
 571                                 * for command completion wake up the thread.
 572                                 */
 573                                spin_lock_irqsave(&psb->buf_lock, iflag);
 574                                psb->cur_iocbq.iocb_flag &=
 575                                        ~LPFC_DRIVER_ABORTED;
 576                                if (psb->waitq)
 577                                        wake_up(psb->waitq);
 578                                spin_unlock_irqrestore(&psb->buf_lock, iflag);
 579                        }
 580
 581                        lpfc_release_scsi_buf_s4(phba, psb);
 582                        if (rrq_empty)
 583                                lpfc_worker_wake_up(phba);
 584                        if (!offline)
 585                                return;
 586                        spin_lock_irqsave(&phba->hbalock, iflag);
 587                        spin_lock(&qp->abts_io_buf_list_lock);
 588                        continue;
 589                }
 590        }
 591        spin_unlock(&qp->abts_io_buf_list_lock);
 592        if (!offline) {
 593                for (i = 1; i <= phba->sli.last_iotag; i++) {
 594                        iocbq = phba->sli.iocbq_lookup[i];
 595
 596                        if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
 597                            (iocbq->iocb_flag & LPFC_IO_LIBDFC))
 598                                continue;
 599                        if (iocbq->sli4_xritag != xri)
 600                                continue;
 601                        psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
 602                        psb->flags &= ~LPFC_SBUF_XBUSY;
 603                        spin_unlock_irqrestore(&phba->hbalock, iflag);
 604                        if (!list_empty(&pring->txq))
 605                                lpfc_worker_wake_up(phba);
 606                        return;
 607                }
 608        }
 609        spin_unlock_irqrestore(&phba->hbalock, iflag);
 610}
 611
 612/**
 613 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 614 * @phba: The HBA for which this call is being executed.
 615 * @ndlp: pointer to a node-list data structure.
 616 * @cmnd: Pointer to scsi_cmnd data structure.
 617 *
 618 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 619 * and returns to caller.
 620 *
 621 * Return codes:
 622 *   NULL - Error
 623 *   Pointer to lpfc_scsi_buf - Success
 624 **/
 625static struct lpfc_io_buf *
 626lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 627                     struct scsi_cmnd *cmnd)
 628{
 629        struct lpfc_io_buf *lpfc_cmd = NULL;
 630        struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
 631        unsigned long iflag = 0;
 632
 633        spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
 634        list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
 635                         list);
 636        if (!lpfc_cmd) {
 637                spin_lock(&phba->scsi_buf_list_put_lock);
 638                list_splice(&phba->lpfc_scsi_buf_list_put,
 639                            &phba->lpfc_scsi_buf_list_get);
 640                INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 641                list_remove_head(scsi_buf_list_get, lpfc_cmd,
 642                                 struct lpfc_io_buf, list);
 643                spin_unlock(&phba->scsi_buf_list_put_lock);
 644        }
 645        spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
 646
 647        if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
 648                atomic_inc(&ndlp->cmd_pending);
 649                lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
 650        }
 651        return  lpfc_cmd;
 652}
 653/**
 654 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
 655 * @phba: The HBA for which this call is being executed.
 656 * @ndlp: pointer to a node-list data structure.
 657 * @cmnd: Pointer to scsi_cmnd data structure.
 658 *
 659 * This routine removes a scsi buffer from head of @hdwq io_buf_list
 660 * and returns to caller.
 661 *
 662 * Return codes:
 663 *   NULL - Error
 664 *   Pointer to lpfc_scsi_buf - Success
 665 **/
 666static struct lpfc_io_buf *
 667lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 668                     struct scsi_cmnd *cmnd)
 669{
 670        struct lpfc_io_buf *lpfc_cmd;
 671        struct lpfc_sli4_hdw_queue *qp;
 672        struct sli4_sge *sgl;
 673        dma_addr_t pdma_phys_fcp_rsp;
 674        dma_addr_t pdma_phys_fcp_cmd;
 675        uint32_t cpu, idx;
 676        int tag;
 677        struct fcp_cmd_rsp_buf *tmp = NULL;
 678
 679        cpu = raw_smp_processor_id();
 680        if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
 681                tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
 682                idx = blk_mq_unique_tag_to_hwq(tag);
 683        } else {
 684                idx = phba->sli4_hba.cpu_map[cpu].hdwq;
 685        }
 686
 687        lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
 688                                   !phba->cfg_xri_rebalancing);
 689        if (!lpfc_cmd) {
 690                qp = &phba->sli4_hba.hdwq[idx];
 691                qp->empty_io_bufs++;
 692                return NULL;
 693        }
 694
 695        /* Setup key fields in buffer that may have been changed
 696         * if other protocols used this buffer.
 697         */
 698        lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
 699        lpfc_cmd->prot_seg_cnt = 0;
 700        lpfc_cmd->seg_cnt = 0;
 701        lpfc_cmd->timeout = 0;
 702        lpfc_cmd->flags = 0;
 703        lpfc_cmd->start_time = jiffies;
 704        lpfc_cmd->waitq = NULL;
 705        lpfc_cmd->cpu = cpu;
 706#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 707        lpfc_cmd->prot_data_type = 0;
 708#endif
 709        tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
 710        if (!tmp) {
 711                lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
 712                return NULL;
 713        }
 714
 715        lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
 716        lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
 717
 718        /*
 719         * The first two SGEs are the FCP_CMD and FCP_RSP.
 720         * The balance are sg list bdes. Initialize the
 721         * first two and leave the rest for queuecommand.
 722         */
 723        sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 724        pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
 725        sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
 726        sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
 727        sgl->word2 = le32_to_cpu(sgl->word2);
 728        bf_set(lpfc_sli4_sge_last, sgl, 0);
 729        sgl->word2 = cpu_to_le32(sgl->word2);
 730        sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
 731        sgl++;
 732
 733        /* Setup the physical region for the FCP RSP */
 734        pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 735        sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
 736        sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
 737        sgl->word2 = le32_to_cpu(sgl->word2);
 738        bf_set(lpfc_sli4_sge_last, sgl, 1);
 739        sgl->word2 = cpu_to_le32(sgl->word2);
 740        sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
 741
 742        if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
 743                atomic_inc(&ndlp->cmd_pending);
 744                lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
 745        }
 746        return  lpfc_cmd;
 747}
 748/**
 749 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 750 * @phba: The HBA for which this call is being executed.
 751 * @ndlp: pointer to a node-list data structure.
 752 * @cmnd: Pointer to scsi_cmnd data structure.
 753 *
 754 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 755 * and returns to caller.
 756 *
 757 * Return codes:
 758 *   NULL - Error
 759 *   Pointer to lpfc_scsi_buf - Success
 760 **/
 761static struct lpfc_io_buf*
 762lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 763                  struct scsi_cmnd *cmnd)
 764{
 765        return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
 766}
 767
 768/**
 769 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
 770 * @phba: The Hba for which this call is being executed.
 771 * @psb: The scsi buffer which is being released.
 772 *
 773 * This routine releases @psb scsi buffer by adding it to tail of @phba
 774 * lpfc_scsi_buf_list list.
 775 **/
 776static void
 777lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 778{
 779        unsigned long iflag = 0;
 780
 781        psb->seg_cnt = 0;
 782        psb->prot_seg_cnt = 0;
 783
 784        spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
 785        psb->pCmd = NULL;
 786        psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
 787        list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
 788        spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 789}
 790
 791/**
 792 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 793 * @phba: The Hba for which this call is being executed.
 794 * @psb: The scsi buffer which is being released.
 795 *
 796 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
 797 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 798 * and cannot be reused for at least RA_TOV amount of time if it was
 799 * aborted.
 800 **/
 801static void
 802lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 803{
 804        struct lpfc_sli4_hdw_queue *qp;
 805        unsigned long iflag = 0;
 806
 807        psb->seg_cnt = 0;
 808        psb->prot_seg_cnt = 0;
 809
 810        qp = psb->hdwq;
 811        if (psb->flags & LPFC_SBUF_XBUSY) {
 812                spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
 813                if (!phba->cfg_fcp_wait_abts_rsp)
 814                        psb->pCmd = NULL;
 815                list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
 816                qp->abts_scsi_io_bufs++;
 817                spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
 818        } else {
 819                lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
 820        }
 821}
 822
 823/**
 824 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 825 * @phba: The Hba for which this call is being executed.
 826 * @psb: The scsi buffer which is being released.
 827 *
 828 * This routine releases @psb scsi buffer by adding it to tail of @phba
 829 * lpfc_scsi_buf_list list.
 830 **/
 831static void
 832lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 833{
 834        if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
 835                atomic_dec(&psb->ndlp->cmd_pending);
 836
 837        psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
 838        phba->lpfc_release_scsi_buf(phba, psb);
 839}
 840
 841/**
 842 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
 843 * @data: A pointer to the immediate command data portion of the IOCB.
 844 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 845 *
 846 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 847 * byte swapping the data to big endian format for transmission on the wire.
 848 **/
 849static void
 850lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
 851{
 852        int i, j;
 853
 854        for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
 855             i += sizeof(uint32_t), j++) {
 856                ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
 857        }
 858}
 859
 860/**
 861 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
 862 * @phba: The Hba for which this call is being executed.
 863 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 864 *
 865 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 866 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
 867 * through sg elements and format the bde. This routine also initializes all
 868 * IOCB fields which are dependent on scsi command request buffer.
 869 *
 870 * Return codes:
 871 *   1 - Error
 872 *   0 - Success
 873 **/
 874static int
 875lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 876{
 877        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 878        struct scatterlist *sgel = NULL;
 879        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 880        struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
 881        struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
 882        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 883        struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
 884        dma_addr_t physaddr;
 885        uint32_t num_bde = 0;
 886        int nseg, datadir = scsi_cmnd->sc_data_direction;
 887
 888        /*
 889         * There are three possibilities here - use scatter-gather segment, use
 890         * the single mapping, or neither.  Start the lpfc command prep by
 891         * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
 892         * data bde entry.
 893         */
 894        bpl += 2;
 895        if (scsi_sg_count(scsi_cmnd)) {
 896                /*
 897                 * The driver stores the segment count returned from dma_map_sg
 898                 * because this a count of dma-mappings used to map the use_sg
 899                 * pages.  They are not guaranteed to be the same for those
 900                 * architectures that implement an IOMMU.
 901                 */
 902
 903                nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
 904                                  scsi_sg_count(scsi_cmnd), datadir);
 905                if (unlikely(!nseg))
 906                        return 1;
 907
 908                lpfc_cmd->seg_cnt = nseg;
 909                if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 910                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
 911                                        "9064 BLKGRD: %s: Too many sg segments"
 912                                        " from dma_map_sg.  Config %d, seg_cnt"
 913                                        " %d\n", __func__, phba->cfg_sg_seg_cnt,
 914                                        lpfc_cmd->seg_cnt);
 915                        WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
 916                        lpfc_cmd->seg_cnt = 0;
 917                        scsi_dma_unmap(scsi_cmnd);
 918                        return 2;
 919                }
 920
 921                /*
 922                 * The driver established a maximum scatter-gather segment count
 923                 * during probe that limits the number of sg elements in any
 924                 * single scsi command.  Just run through the seg_cnt and format
 925                 * the bde's.
 926                 * When using SLI-3 the driver will try to fit all the BDEs into
 927                 * the IOCB. If it can't then the BDEs get added to a BPL as it
 928                 * does for SLI-2 mode.
 929                 */
 930                scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
 931                        physaddr = sg_dma_address(sgel);
 932                        if (phba->sli_rev == 3 &&
 933                            !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
 934                            !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
 935                            nseg <= LPFC_EXT_DATA_BDE_COUNT) {
 936                                data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 937                                data_bde->tus.f.bdeSize = sg_dma_len(sgel);
 938                                data_bde->addrLow = putPaddrLow(physaddr);
 939                                data_bde->addrHigh = putPaddrHigh(physaddr);
 940                                data_bde++;
 941                        } else {
 942                                bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 943                                bpl->tus.f.bdeSize = sg_dma_len(sgel);
 944                                bpl->tus.w = le32_to_cpu(bpl->tus.w);
 945                                bpl->addrLow =
 946                                        le32_to_cpu(putPaddrLow(physaddr));
 947                                bpl->addrHigh =
 948                                        le32_to_cpu(putPaddrHigh(physaddr));
 949                                bpl++;
 950                        }
 951                }
 952        }
 953
 954        /*
 955         * Finish initializing those IOCB fields that are dependent on the
 956         * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
 957         * explicitly reinitialized and for SLI-3 the extended bde count is
 958         * explicitly reinitialized since all iocb memory resources are reused.
 959         */
 960        if (phba->sli_rev == 3 &&
 961            !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
 962            !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
 963                if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
 964                        /*
 965                         * The extended IOCB format can only fit 3 BDE or a BPL.
 966                         * This I/O has more than 3 BDE so the 1st data bde will
 967                         * be a BPL that is filled in here.
 968                         */
 969                        physaddr = lpfc_cmd->dma_handle;
 970                        data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
 971                        data_bde->tus.f.bdeSize = (num_bde *
 972                                                   sizeof(struct ulp_bde64));
 973                        physaddr += (sizeof(struct fcp_cmnd) +
 974                                     sizeof(struct fcp_rsp) +
 975                                     (2 * sizeof(struct ulp_bde64)));
 976                        data_bde->addrHigh = putPaddrHigh(physaddr);
 977                        data_bde->addrLow = putPaddrLow(physaddr);
 978                        /* ebde count includes the response bde and data bpl */
 979                        iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
 980                } else {
 981                        /* ebde count includes the response bde and data bdes */
 982                        iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
 983                }
 984        } else {
 985                iocb_cmd->un.fcpi64.bdl.bdeSize =
 986                        ((num_bde + 2) * sizeof(struct ulp_bde64));
 987                iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
 988        }
 989        fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
 990
 991        /*
 992         * Due to difference in data length between DIF/non-DIF paths,
 993         * we need to set word 4 of IOCB here
 994         */
 995        iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
 996        lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
 997        return 0;
 998}
 999
1000#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1001
1002/* Return BG_ERR_INIT if error injection is detected by Initiator */
1003#define BG_ERR_INIT     0x1
1004/* Return BG_ERR_TGT if error injection is detected by Target */
1005#define BG_ERR_TGT      0x2
1006/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1007#define BG_ERR_SWAP     0x10
1008/*
1009 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1010 * error injection
1011 */
1012#define BG_ERR_CHECK    0x20
1013
1014/**
1015 * lpfc_bg_err_inject - Determine if we should inject an error
1016 * @phba: The Hba for which this call is being executed.
1017 * @sc: The SCSI command to examine
1018 * @reftag: (out) BlockGuard reference tag for transmitted data
1019 * @apptag: (out) BlockGuard application tag for transmitted data
1020 * @new_guard: (in) Value to replace CRC with if needed
1021 *
1022 * Returns BG_ERR_* bit mask or 0 if request ignored
1023 **/
1024static int
1025lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1026                uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1027{
1028        struct scatterlist *sgpe; /* s/g prot entry */
1029        struct lpfc_io_buf *lpfc_cmd = NULL;
1030        struct scsi_dif_tuple *src = NULL;
1031        struct lpfc_nodelist *ndlp;
1032        struct lpfc_rport_data *rdata;
1033        uint32_t op = scsi_get_prot_op(sc);
1034        uint32_t blksize;
1035        uint32_t numblks;
1036        u32 lba;
1037        int rc = 0;
1038        int blockoff = 0;
1039
1040        if (op == SCSI_PROT_NORMAL)
1041                return 0;
1042
1043        sgpe = scsi_prot_sglist(sc);
1044        lba = scsi_prot_ref_tag(sc);
1045        if (lba == LPFC_INVALID_REFTAG)
1046                return 0;
1047
1048        /* First check if we need to match the LBA */
1049        if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1050                blksize = scsi_prot_interval(sc);
1051                numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1052
1053                /* Make sure we have the right LBA if one is specified */
1054                if (phba->lpfc_injerr_lba < (u64)lba ||
1055                    (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1056                        return 0;
1057                if (sgpe) {
1058                        blockoff = phba->lpfc_injerr_lba - (u64)lba;
1059                        numblks = sg_dma_len(sgpe) /
1060                                sizeof(struct scsi_dif_tuple);
1061                        if (numblks < blockoff)
1062                                blockoff = numblks;
1063                }
1064        }
1065
1066        /* Next check if we need to match the remote NPortID or WWPN */
1067        rdata = lpfc_rport_data_from_scsi_device(sc->device);
1068        if (rdata && rdata->pnode) {
1069                ndlp = rdata->pnode;
1070
1071                /* Make sure we have the right NPortID if one is specified */
1072                if (phba->lpfc_injerr_nportid  &&
1073                        (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1074                        return 0;
1075
1076                /*
1077                 * Make sure we have the right WWPN if one is specified.
1078                 * wwn[0] should be a non-zero NAA in a good WWPN.
1079                 */
1080                if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1081                        (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1082                                sizeof(struct lpfc_name)) != 0))
1083                        return 0;
1084        }
1085
1086        /* Setup a ptr to the protection data if the SCSI host provides it */
1087        if (sgpe) {
1088                src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1089                src += blockoff;
1090                lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1091        }
1092
1093        /* Should we change the Reference Tag */
1094        if (reftag) {
1095                if (phba->lpfc_injerr_wref_cnt) {
1096                        switch (op) {
1097                        case SCSI_PROT_WRITE_PASS:
1098                                if (src) {
1099                                        /*
1100                                         * For WRITE_PASS, force the error
1101                                         * to be sent on the wire. It should
1102                                         * be detected by the Target.
1103                                         * If blockoff != 0 error will be
1104                                         * inserted in middle of the IO.
1105                                         */
1106
1107                                        lpfc_printf_log(phba, KERN_ERR,
1108                                                        LOG_TRACE_EVENT,
1109                                        "9076 BLKGRD: Injecting reftag error: "
1110                                        "write lba x%lx + x%x oldrefTag x%x\n",
1111                                        (unsigned long)lba, blockoff,
1112                                        be32_to_cpu(src->ref_tag));
1113
1114                                        /*
1115                                         * Save the old ref_tag so we can
1116                                         * restore it on completion.
1117                                         */
1118                                        if (lpfc_cmd) {
1119                                                lpfc_cmd->prot_data_type =
1120                                                        LPFC_INJERR_REFTAG;
1121                                                lpfc_cmd->prot_data_segment =
1122                                                        src;
1123                                                lpfc_cmd->prot_data =
1124                                                        src->ref_tag;
1125                                        }
1126                                        src->ref_tag = cpu_to_be32(0xDEADBEEF);
1127                                        phba->lpfc_injerr_wref_cnt--;
1128                                        if (phba->lpfc_injerr_wref_cnt == 0) {
1129                                                phba->lpfc_injerr_nportid = 0;
1130                                                phba->lpfc_injerr_lba =
1131                                                        LPFC_INJERR_LBA_OFF;
1132                                                memset(&phba->lpfc_injerr_wwpn,
1133                                                  0, sizeof(struct lpfc_name));
1134                                        }
1135                                        rc = BG_ERR_TGT | BG_ERR_CHECK;
1136
1137                                        break;
1138                                }
1139                                fallthrough;
1140                        case SCSI_PROT_WRITE_INSERT:
1141                                /*
1142                                 * For WRITE_INSERT, force the error
1143                                 * to be sent on the wire. It should be
1144                                 * detected by the Target.
1145                                 */
1146                                /* DEADBEEF will be the reftag on the wire */
1147                                *reftag = 0xDEADBEEF;
1148                                phba->lpfc_injerr_wref_cnt--;
1149                                if (phba->lpfc_injerr_wref_cnt == 0) {
1150                                        phba->lpfc_injerr_nportid = 0;
1151                                        phba->lpfc_injerr_lba =
1152                                        LPFC_INJERR_LBA_OFF;
1153                                        memset(&phba->lpfc_injerr_wwpn,
1154                                                0, sizeof(struct lpfc_name));
1155                                }
1156                                rc = BG_ERR_TGT | BG_ERR_CHECK;
1157
1158                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1159                                        "9078 BLKGRD: Injecting reftag error: "
1160                                        "write lba x%lx\n", (unsigned long)lba);
1161                                break;
1162                        case SCSI_PROT_WRITE_STRIP:
1163                                /*
1164                                 * For WRITE_STRIP and WRITE_PASS,
1165                                 * force the error on data
1166                                 * being copied from SLI-Host to SLI-Port.
1167                                 */
1168                                *reftag = 0xDEADBEEF;
1169                                phba->lpfc_injerr_wref_cnt--;
1170                                if (phba->lpfc_injerr_wref_cnt == 0) {
1171                                        phba->lpfc_injerr_nportid = 0;
1172                                        phba->lpfc_injerr_lba =
1173                                                LPFC_INJERR_LBA_OFF;
1174                                        memset(&phba->lpfc_injerr_wwpn,
1175                                                0, sizeof(struct lpfc_name));
1176                                }
1177                                rc = BG_ERR_INIT;
1178
1179                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1180                                        "9077 BLKGRD: Injecting reftag error: "
1181                                        "write lba x%lx\n", (unsigned long)lba);
1182                                break;
1183                        }
1184                }
1185                if (phba->lpfc_injerr_rref_cnt) {
1186                        switch (op) {
1187                        case SCSI_PROT_READ_INSERT:
1188                        case SCSI_PROT_READ_STRIP:
1189                        case SCSI_PROT_READ_PASS:
1190                                /*
1191                                 * For READ_STRIP and READ_PASS, force the
1192                                 * error on data being read off the wire. It
1193                                 * should force an IO error to the driver.
1194                                 */
1195                                *reftag = 0xDEADBEEF;
1196                                phba->lpfc_injerr_rref_cnt--;
1197                                if (phba->lpfc_injerr_rref_cnt == 0) {
1198                                        phba->lpfc_injerr_nportid = 0;
1199                                        phba->lpfc_injerr_lba =
1200                                                LPFC_INJERR_LBA_OFF;
1201                                        memset(&phba->lpfc_injerr_wwpn,
1202                                                0, sizeof(struct lpfc_name));
1203                                }
1204                                rc = BG_ERR_INIT;
1205
1206                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1207                                        "9079 BLKGRD: Injecting reftag error: "
1208                                        "read lba x%lx\n", (unsigned long)lba);
1209                                break;
1210                        }
1211                }
1212        }
1213
1214        /* Should we change the Application Tag */
1215        if (apptag) {
1216                if (phba->lpfc_injerr_wapp_cnt) {
1217                        switch (op) {
1218                        case SCSI_PROT_WRITE_PASS:
1219                                if (src) {
1220                                        /*
1221                                         * For WRITE_PASS, force the error
1222                                         * to be sent on the wire. It should
1223                                         * be detected by the Target.
1224                                         * If blockoff != 0 error will be
1225                                         * inserted in middle of the IO.
1226                                         */
1227
1228                                        lpfc_printf_log(phba, KERN_ERR,
1229                                                        LOG_TRACE_EVENT,
1230                                        "9080 BLKGRD: Injecting apptag error: "
1231                                        "write lba x%lx + x%x oldappTag x%x\n",
1232                                        (unsigned long)lba, blockoff,
1233                                        be16_to_cpu(src->app_tag));
1234
1235                                        /*
1236                                         * Save the old app_tag so we can
1237                                         * restore it on completion.
1238                                         */
1239                                        if (lpfc_cmd) {
1240                                                lpfc_cmd->prot_data_type =
1241                                                        LPFC_INJERR_APPTAG;
1242                                                lpfc_cmd->prot_data_segment =
1243                                                        src;
1244                                                lpfc_cmd->prot_data =
1245                                                        src->app_tag;
1246                                        }
1247                                        src->app_tag = cpu_to_be16(0xDEAD);
1248                                        phba->lpfc_injerr_wapp_cnt--;
1249                                        if (phba->lpfc_injerr_wapp_cnt == 0) {
1250                                                phba->lpfc_injerr_nportid = 0;
1251                                                phba->lpfc_injerr_lba =
1252                                                        LPFC_INJERR_LBA_OFF;
1253                                                memset(&phba->lpfc_injerr_wwpn,
1254                                                  0, sizeof(struct lpfc_name));
1255                                        }
1256                                        rc = BG_ERR_TGT | BG_ERR_CHECK;
1257                                        break;
1258                                }
1259                                fallthrough;
1260                        case SCSI_PROT_WRITE_INSERT:
1261                                /*
1262                                 * For WRITE_INSERT, force the
1263                                 * error to be sent on the wire. It should be
1264                                 * detected by the Target.
1265                                 */
1266                                /* DEAD will be the apptag on the wire */
1267                                *apptag = 0xDEAD;
1268                                phba->lpfc_injerr_wapp_cnt--;
1269                                if (phba->lpfc_injerr_wapp_cnt == 0) {
1270                                        phba->lpfc_injerr_nportid = 0;
1271                                        phba->lpfc_injerr_lba =
1272                                                LPFC_INJERR_LBA_OFF;
1273                                        memset(&phba->lpfc_injerr_wwpn,
1274                                                0, sizeof(struct lpfc_name));
1275                                }
1276                                rc = BG_ERR_TGT | BG_ERR_CHECK;
1277
1278                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1279                                        "0813 BLKGRD: Injecting apptag error: "
1280                                        "write lba x%lx\n", (unsigned long)lba);
1281                                break;
1282                        case SCSI_PROT_WRITE_STRIP:
1283                                /*
1284                                 * For WRITE_STRIP and WRITE_PASS,
1285                                 * force the error on data
1286                                 * being copied from SLI-Host to SLI-Port.
1287                                 */
1288                                *apptag = 0xDEAD;
1289                                phba->lpfc_injerr_wapp_cnt--;
1290                                if (phba->lpfc_injerr_wapp_cnt == 0) {
1291                                        phba->lpfc_injerr_nportid = 0;
1292                                        phba->lpfc_injerr_lba =
1293                                                LPFC_INJERR_LBA_OFF;
1294                                        memset(&phba->lpfc_injerr_wwpn,
1295                                                0, sizeof(struct lpfc_name));
1296                                }
1297                                rc = BG_ERR_INIT;
1298
1299                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1300                                        "0812 BLKGRD: Injecting apptag error: "
1301                                        "write lba x%lx\n", (unsigned long)lba);
1302                                break;
1303                        }
1304                }
1305                if (phba->lpfc_injerr_rapp_cnt) {
1306                        switch (op) {
1307                        case SCSI_PROT_READ_INSERT:
1308                        case SCSI_PROT_READ_STRIP:
1309                        case SCSI_PROT_READ_PASS:
1310                                /*
1311                                 * For READ_STRIP and READ_PASS, force the
1312                                 * error on data being read off the wire. It
1313                                 * should force an IO error to the driver.
1314                                 */
1315                                *apptag = 0xDEAD;
1316                                phba->lpfc_injerr_rapp_cnt--;
1317                                if (phba->lpfc_injerr_rapp_cnt == 0) {
1318                                        phba->lpfc_injerr_nportid = 0;
1319                                        phba->lpfc_injerr_lba =
1320                                                LPFC_INJERR_LBA_OFF;
1321                                        memset(&phba->lpfc_injerr_wwpn,
1322                                                0, sizeof(struct lpfc_name));
1323                                }
1324                                rc = BG_ERR_INIT;
1325
1326                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1327                                        "0814 BLKGRD: Injecting apptag error: "
1328                                        "read lba x%lx\n", (unsigned long)lba);
1329                                break;
1330                        }
1331                }
1332        }
1333
1334
1335        /* Should we change the Guard Tag */
1336        if (new_guard) {
1337                if (phba->lpfc_injerr_wgrd_cnt) {
1338                        switch (op) {
1339                        case SCSI_PROT_WRITE_PASS:
1340                                rc = BG_ERR_CHECK;
1341                                fallthrough;
1342
1343                        case SCSI_PROT_WRITE_INSERT:
1344                                /*
1345                                 * For WRITE_INSERT, force the
1346                                 * error to be sent on the wire. It should be
1347                                 * detected by the Target.
1348                                 */
1349                                phba->lpfc_injerr_wgrd_cnt--;
1350                                if (phba->lpfc_injerr_wgrd_cnt == 0) {
1351                                        phba->lpfc_injerr_nportid = 0;
1352                                        phba->lpfc_injerr_lba =
1353                                                LPFC_INJERR_LBA_OFF;
1354                                        memset(&phba->lpfc_injerr_wwpn,
1355                                                0, sizeof(struct lpfc_name));
1356                                }
1357
1358                                rc |= BG_ERR_TGT | BG_ERR_SWAP;
1359                                /* Signals the caller to swap CRC->CSUM */
1360
1361                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1362                                        "0817 BLKGRD: Injecting guard error: "
1363                                        "write lba x%lx\n", (unsigned long)lba);
1364                                break;
1365                        case SCSI_PROT_WRITE_STRIP:
1366                                /*
1367                                 * For WRITE_STRIP and WRITE_PASS,
1368                                 * force the error on data
1369                                 * being copied from SLI-Host to SLI-Port.
1370                                 */
1371                                phba->lpfc_injerr_wgrd_cnt--;
1372                                if (phba->lpfc_injerr_wgrd_cnt == 0) {
1373                                        phba->lpfc_injerr_nportid = 0;
1374                                        phba->lpfc_injerr_lba =
1375                                                LPFC_INJERR_LBA_OFF;
1376                                        memset(&phba->lpfc_injerr_wwpn,
1377                                                0, sizeof(struct lpfc_name));
1378                                }
1379
1380                                rc = BG_ERR_INIT | BG_ERR_SWAP;
1381                                /* Signals the caller to swap CRC->CSUM */
1382
1383                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1384                                        "0816 BLKGRD: Injecting guard error: "
1385                                        "write lba x%lx\n", (unsigned long)lba);
1386                                break;
1387                        }
1388                }
1389                if (phba->lpfc_injerr_rgrd_cnt) {
1390                        switch (op) {
1391                        case SCSI_PROT_READ_INSERT:
1392                        case SCSI_PROT_READ_STRIP:
1393                        case SCSI_PROT_READ_PASS:
1394                                /*
1395                                 * For READ_STRIP and READ_PASS, force the
1396                                 * error on data being read off the wire. It
1397                                 * should force an IO error to the driver.
1398                                 */
1399                                phba->lpfc_injerr_rgrd_cnt--;
1400                                if (phba->lpfc_injerr_rgrd_cnt == 0) {
1401                                        phba->lpfc_injerr_nportid = 0;
1402                                        phba->lpfc_injerr_lba =
1403                                                LPFC_INJERR_LBA_OFF;
1404                                        memset(&phba->lpfc_injerr_wwpn,
1405                                                0, sizeof(struct lpfc_name));
1406                                }
1407
1408                                rc = BG_ERR_INIT | BG_ERR_SWAP;
1409                                /* Signals the caller to swap CRC->CSUM */
1410
1411                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1412                                        "0818 BLKGRD: Injecting guard error: "
1413                                        "read lba x%lx\n", (unsigned long)lba);
1414                        }
1415                }
1416        }
1417
1418        return rc;
1419}
1420#endif
1421
1422/**
1423 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1424 * the specified SCSI command.
1425 * @phba: The Hba for which this call is being executed.
1426 * @sc: The SCSI command to examine
1427 * @txop: (out) BlockGuard operation for transmitted data
1428 * @rxop: (out) BlockGuard operation for received data
1429 *
1430 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1431 *
1432 **/
1433static int
1434lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1435                uint8_t *txop, uint8_t *rxop)
1436{
1437        uint8_t ret = 0;
1438
1439        if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1440                switch (scsi_get_prot_op(sc)) {
1441                case SCSI_PROT_READ_INSERT:
1442                case SCSI_PROT_WRITE_STRIP:
1443                        *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1444                        *txop = BG_OP_IN_CSUM_OUT_NODIF;
1445                        break;
1446
1447                case SCSI_PROT_READ_STRIP:
1448                case SCSI_PROT_WRITE_INSERT:
1449                        *rxop = BG_OP_IN_CRC_OUT_NODIF;
1450                        *txop = BG_OP_IN_NODIF_OUT_CRC;
1451                        break;
1452
1453                case SCSI_PROT_READ_PASS:
1454                case SCSI_PROT_WRITE_PASS:
1455                        *rxop = BG_OP_IN_CRC_OUT_CSUM;
1456                        *txop = BG_OP_IN_CSUM_OUT_CRC;
1457                        break;
1458
1459                case SCSI_PROT_NORMAL:
1460                default:
1461                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1462                                "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1463                                        scsi_get_prot_op(sc));
1464                        ret = 1;
1465                        break;
1466
1467                }
1468        } else {
1469                switch (scsi_get_prot_op(sc)) {
1470                case SCSI_PROT_READ_STRIP:
1471                case SCSI_PROT_WRITE_INSERT:
1472                        *rxop = BG_OP_IN_CRC_OUT_NODIF;
1473                        *txop = BG_OP_IN_NODIF_OUT_CRC;
1474                        break;
1475
1476                case SCSI_PROT_READ_PASS:
1477                case SCSI_PROT_WRITE_PASS:
1478                        *rxop = BG_OP_IN_CRC_OUT_CRC;
1479                        *txop = BG_OP_IN_CRC_OUT_CRC;
1480                        break;
1481
1482                case SCSI_PROT_READ_INSERT:
1483                case SCSI_PROT_WRITE_STRIP:
1484                        *rxop = BG_OP_IN_NODIF_OUT_CRC;
1485                        *txop = BG_OP_IN_CRC_OUT_NODIF;
1486                        break;
1487
1488                case SCSI_PROT_NORMAL:
1489                default:
1490                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1491                                "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1492                                        scsi_get_prot_op(sc));
1493                        ret = 1;
1494                        break;
1495                }
1496        }
1497
1498        return ret;
1499}
1500
1501#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1502/**
1503 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1504 * the specified SCSI command in order to force a guard tag error.
1505 * @phba: The Hba for which this call is being executed.
1506 * @sc: The SCSI command to examine
1507 * @txop: (out) BlockGuard operation for transmitted data
1508 * @rxop: (out) BlockGuard operation for received data
1509 *
1510 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1511 *
1512 **/
1513static int
1514lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1515                uint8_t *txop, uint8_t *rxop)
1516{
1517
1518        if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1519                switch (scsi_get_prot_op(sc)) {
1520                case SCSI_PROT_READ_INSERT:
1521                case SCSI_PROT_WRITE_STRIP:
1522                        *rxop = BG_OP_IN_NODIF_OUT_CRC;
1523                        *txop = BG_OP_IN_CRC_OUT_NODIF;
1524                        break;
1525
1526                case SCSI_PROT_READ_STRIP:
1527                case SCSI_PROT_WRITE_INSERT:
1528                        *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1529                        *txop = BG_OP_IN_NODIF_OUT_CSUM;
1530                        break;
1531
1532                case SCSI_PROT_READ_PASS:
1533                case SCSI_PROT_WRITE_PASS:
1534                        *rxop = BG_OP_IN_CSUM_OUT_CRC;
1535                        *txop = BG_OP_IN_CRC_OUT_CSUM;
1536                        break;
1537
1538                case SCSI_PROT_NORMAL:
1539                default:
1540                        break;
1541
1542                }
1543        } else {
1544                switch (scsi_get_prot_op(sc)) {
1545                case SCSI_PROT_READ_STRIP:
1546                case SCSI_PROT_WRITE_INSERT:
1547                        *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1548                        *txop = BG_OP_IN_NODIF_OUT_CSUM;
1549                        break;
1550
1551                case SCSI_PROT_READ_PASS:
1552                case SCSI_PROT_WRITE_PASS:
1553                        *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1554                        *txop = BG_OP_IN_CSUM_OUT_CSUM;
1555                        break;
1556
1557                case SCSI_PROT_READ_INSERT:
1558                case SCSI_PROT_WRITE_STRIP:
1559                        *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1560                        *txop = BG_OP_IN_CSUM_OUT_NODIF;
1561                        break;
1562
1563                case SCSI_PROT_NORMAL:
1564                default:
1565                        break;
1566                }
1567        }
1568
1569        return 0;
1570}
1571#endif
1572
1573/**
1574 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1575 * @phba: The Hba for which this call is being executed.
1576 * @sc: pointer to scsi command we're working on
1577 * @bpl: pointer to buffer list for protection groups
1578 * @datasegcnt: number of segments of data that have been dma mapped
1579 *
1580 * This function sets up BPL buffer list for protection groups of
1581 * type LPFC_PG_TYPE_NO_DIF
1582 *
1583 * This is usually used when the HBA is instructed to generate
1584 * DIFs and insert them into data stream (or strip DIF from
1585 * incoming data stream)
1586 *
1587 * The buffer list consists of just one protection group described
1588 * below:
1589 *                                +-------------------------+
1590 *   start of prot group  -->     |          PDE_5          |
1591 *                                +-------------------------+
1592 *                                |          PDE_6          |
1593 *                                +-------------------------+
1594 *                                |         Data BDE        |
1595 *                                +-------------------------+
1596 *                                |more Data BDE's ... (opt)|
1597 *                                +-------------------------+
1598 *
1599 *
1600 * Note: Data s/g buffers have been dma mapped
1601 *
1602 * Returns the number of BDEs added to the BPL.
1603 **/
1604static int
1605lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1606                struct ulp_bde64 *bpl, int datasegcnt)
1607{
1608        struct scatterlist *sgde = NULL; /* s/g data entry */
1609        struct lpfc_pde5 *pde5 = NULL;
1610        struct lpfc_pde6 *pde6 = NULL;
1611        dma_addr_t physaddr;
1612        int i = 0, num_bde = 0, status;
1613        int datadir = sc->sc_data_direction;
1614#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1615        uint32_t rc;
1616#endif
1617        uint32_t checking = 1;
1618        uint32_t reftag;
1619        uint8_t txop, rxop;
1620
1621        status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1622        if (status)
1623                goto out;
1624
1625        /* extract some info from the scsi command for pde*/
1626        reftag = scsi_prot_ref_tag(sc);
1627        if (reftag == LPFC_INVALID_REFTAG)
1628                goto out;
1629
1630#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1631        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1632        if (rc) {
1633                if (rc & BG_ERR_SWAP)
1634                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1635                if (rc & BG_ERR_CHECK)
1636                        checking = 0;
1637        }
1638#endif
1639
1640        /* setup PDE5 with what we have */
1641        pde5 = (struct lpfc_pde5 *) bpl;
1642        memset(pde5, 0, sizeof(struct lpfc_pde5));
1643        bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1644
1645        /* Endianness conversion if necessary for PDE5 */
1646        pde5->word0 = cpu_to_le32(pde5->word0);
1647        pde5->reftag = cpu_to_le32(reftag);
1648
1649        /* advance bpl and increment bde count */
1650        num_bde++;
1651        bpl++;
1652        pde6 = (struct lpfc_pde6 *) bpl;
1653
1654        /* setup PDE6 with the rest of the info */
1655        memset(pde6, 0, sizeof(struct lpfc_pde6));
1656        bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1657        bf_set(pde6_optx, pde6, txop);
1658        bf_set(pde6_oprx, pde6, rxop);
1659
1660        /*
1661         * We only need to check the data on READs, for WRITEs
1662         * protection data is automatically generated, not checked.
1663         */
1664        if (datadir == DMA_FROM_DEVICE) {
1665                if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1666                        bf_set(pde6_ce, pde6, checking);
1667                else
1668                        bf_set(pde6_ce, pde6, 0);
1669
1670                if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1671                        bf_set(pde6_re, pde6, checking);
1672                else
1673                        bf_set(pde6_re, pde6, 0);
1674        }
1675        bf_set(pde6_ai, pde6, 1);
1676        bf_set(pde6_ae, pde6, 0);
1677        bf_set(pde6_apptagval, pde6, 0);
1678
1679        /* Endianness conversion if necessary for PDE6 */
1680        pde6->word0 = cpu_to_le32(pde6->word0);
1681        pde6->word1 = cpu_to_le32(pde6->word1);
1682        pde6->word2 = cpu_to_le32(pde6->word2);
1683
1684        /* advance bpl and increment bde count */
1685        num_bde++;
1686        bpl++;
1687
1688        /* assumption: caller has already run dma_map_sg on command data */
1689        scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1690                physaddr = sg_dma_address(sgde);
1691                bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1692                bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1693                bpl->tus.f.bdeSize = sg_dma_len(sgde);
1694                if (datadir == DMA_TO_DEVICE)
1695                        bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1696                else
1697                        bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1698                bpl->tus.w = le32_to_cpu(bpl->tus.w);
1699                bpl++;
1700                num_bde++;
1701        }
1702
1703out:
1704        return num_bde;
1705}
1706
1707/**
1708 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1709 * @phba: The Hba for which this call is being executed.
1710 * @sc: pointer to scsi command we're working on
1711 * @bpl: pointer to buffer list for protection groups
1712 * @datacnt: number of segments of data that have been dma mapped
1713 * @protcnt: number of segment of protection data that have been dma mapped
1714 *
1715 * This function sets up BPL buffer list for protection groups of
1716 * type LPFC_PG_TYPE_DIF
1717 *
1718 * This is usually used when DIFs are in their own buffers,
1719 * separate from the data. The HBA can then by instructed
1720 * to place the DIFs in the outgoing stream.  For read operations,
1721 * The HBA could extract the DIFs and place it in DIF buffers.
1722 *
1723 * The buffer list for this type consists of one or more of the
1724 * protection groups described below:
1725 *                                    +-------------------------+
1726 *   start of first prot group  -->   |          PDE_5          |
1727 *                                    +-------------------------+
1728 *                                    |          PDE_6          |
1729 *                                    +-------------------------+
1730 *                                    |      PDE_7 (Prot BDE)   |
1731 *                                    +-------------------------+
1732 *                                    |        Data BDE         |
1733 *                                    +-------------------------+
1734 *                                    |more Data BDE's ... (opt)|
1735 *                                    +-------------------------+
1736 *   start of new  prot group  -->    |          PDE_5          |
1737 *                                    +-------------------------+
1738 *                                    |          ...            |
1739 *                                    +-------------------------+
1740 *
1741 * Note: It is assumed that both data and protection s/g buffers have been
1742 *       mapped for DMA
1743 *
1744 * Returns the number of BDEs added to the BPL.
1745 **/
1746static int
1747lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1748                struct ulp_bde64 *bpl, int datacnt, int protcnt)
1749{
1750        struct scatterlist *sgde = NULL; /* s/g data entry */
1751        struct scatterlist *sgpe = NULL; /* s/g prot entry */
1752        struct lpfc_pde5 *pde5 = NULL;
1753        struct lpfc_pde6 *pde6 = NULL;
1754        struct lpfc_pde7 *pde7 = NULL;
1755        dma_addr_t dataphysaddr, protphysaddr;
1756        unsigned short curr_data = 0, curr_prot = 0;
1757        unsigned int split_offset;
1758        unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1759        unsigned int protgrp_blks, protgrp_bytes;
1760        unsigned int remainder, subtotal;
1761        int status;
1762        int datadir = sc->sc_data_direction;
1763        unsigned char pgdone = 0, alldone = 0;
1764        unsigned blksize;
1765#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1766        uint32_t rc;
1767#endif
1768        uint32_t checking = 1;
1769        uint32_t reftag;
1770        uint8_t txop, rxop;
1771        int num_bde = 0;
1772
1773        sgpe = scsi_prot_sglist(sc);
1774        sgde = scsi_sglist(sc);
1775
1776        if (!sgpe || !sgde) {
1777                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1778                                "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1779                                sgpe, sgde);
1780                return 0;
1781        }
1782
1783        status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1784        if (status)
1785                goto out;
1786
1787        /* extract some info from the scsi command */
1788        blksize = scsi_prot_interval(sc);
1789        reftag = scsi_prot_ref_tag(sc);
1790        if (reftag == LPFC_INVALID_REFTAG)
1791                goto out;
1792
1793#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1794        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1795        if (rc) {
1796                if (rc & BG_ERR_SWAP)
1797                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1798                if (rc & BG_ERR_CHECK)
1799                        checking = 0;
1800        }
1801#endif
1802
1803        split_offset = 0;
1804        do {
1805                /* Check to see if we ran out of space */
1806                if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1807                        return num_bde + 3;
1808
1809                /* setup PDE5 with what we have */
1810                pde5 = (struct lpfc_pde5 *) bpl;
1811                memset(pde5, 0, sizeof(struct lpfc_pde5));
1812                bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1813
1814                /* Endianness conversion if necessary for PDE5 */
1815                pde5->word0 = cpu_to_le32(pde5->word0);
1816                pde5->reftag = cpu_to_le32(reftag);
1817
1818                /* advance bpl and increment bde count */
1819                num_bde++;
1820                bpl++;
1821                pde6 = (struct lpfc_pde6 *) bpl;
1822
1823                /* setup PDE6 with the rest of the info */
1824                memset(pde6, 0, sizeof(struct lpfc_pde6));
1825                bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1826                bf_set(pde6_optx, pde6, txop);
1827                bf_set(pde6_oprx, pde6, rxop);
1828
1829                if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1830                        bf_set(pde6_ce, pde6, checking);
1831                else
1832                        bf_set(pde6_ce, pde6, 0);
1833
1834                if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1835                        bf_set(pde6_re, pde6, checking);
1836                else
1837                        bf_set(pde6_re, pde6, 0);
1838
1839                bf_set(pde6_ai, pde6, 1);
1840                bf_set(pde6_ae, pde6, 0);
1841                bf_set(pde6_apptagval, pde6, 0);
1842
1843                /* Endianness conversion if necessary for PDE6 */
1844                pde6->word0 = cpu_to_le32(pde6->word0);
1845                pde6->word1 = cpu_to_le32(pde6->word1);
1846                pde6->word2 = cpu_to_le32(pde6->word2);
1847
1848                /* advance bpl and increment bde count */
1849                num_bde++;
1850                bpl++;
1851
1852                /* setup the first BDE that points to protection buffer */
1853                protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1854                protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1855
1856                /* must be integer multiple of the DIF block length */
1857                BUG_ON(protgroup_len % 8);
1858
1859                pde7 = (struct lpfc_pde7 *) bpl;
1860                memset(pde7, 0, sizeof(struct lpfc_pde7));
1861                bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1862
1863                pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1864                pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1865
1866                protgrp_blks = protgroup_len / 8;
1867                protgrp_bytes = protgrp_blks * blksize;
1868
1869                /* check if this pde is crossing the 4K boundary; if so split */
1870                if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1871                        protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1872                        protgroup_offset += protgroup_remainder;
1873                        protgrp_blks = protgroup_remainder / 8;
1874                        protgrp_bytes = protgrp_blks * blksize;
1875                } else {
1876                        protgroup_offset = 0;
1877                        curr_prot++;
1878                }
1879
1880                num_bde++;
1881
1882                /* setup BDE's for data blocks associated with DIF data */
1883                pgdone = 0;
1884                subtotal = 0; /* total bytes processed for current prot grp */
1885                while (!pgdone) {
1886                        /* Check to see if we ran out of space */
1887                        if (num_bde >= phba->cfg_total_seg_cnt)
1888                                return num_bde + 1;
1889
1890                        if (!sgde) {
1891                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1892                                        "9065 BLKGRD:%s Invalid data segment\n",
1893                                                __func__);
1894                                return 0;
1895                        }
1896                        bpl++;
1897                        dataphysaddr = sg_dma_address(sgde) + split_offset;
1898                        bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1899                        bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1900
1901                        remainder = sg_dma_len(sgde) - split_offset;
1902
1903                        if ((subtotal + remainder) <= protgrp_bytes) {
1904                                /* we can use this whole buffer */
1905                                bpl->tus.f.bdeSize = remainder;
1906                                split_offset = 0;
1907
1908                                if ((subtotal + remainder) == protgrp_bytes)
1909                                        pgdone = 1;
1910                        } else {
1911                                /* must split this buffer with next prot grp */
1912                                bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1913                                split_offset += bpl->tus.f.bdeSize;
1914                        }
1915
1916                        subtotal += bpl->tus.f.bdeSize;
1917
1918                        if (datadir == DMA_TO_DEVICE)
1919                                bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1920                        else
1921                                bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1922                        bpl->tus.w = le32_to_cpu(bpl->tus.w);
1923
1924                        num_bde++;
1925                        curr_data++;
1926
1927                        if (split_offset)
1928                                break;
1929
1930                        /* Move to the next s/g segment if possible */
1931                        sgde = sg_next(sgde);
1932
1933                }
1934
1935                if (protgroup_offset) {
1936                        /* update the reference tag */
1937                        reftag += protgrp_blks;
1938                        bpl++;
1939                        continue;
1940                }
1941
1942                /* are we done ? */
1943                if (curr_prot == protcnt) {
1944                        alldone = 1;
1945                } else if (curr_prot < protcnt) {
1946                        /* advance to next prot buffer */
1947                        sgpe = sg_next(sgpe);
1948                        bpl++;
1949
1950                        /* update the reference tag */
1951                        reftag += protgrp_blks;
1952                } else {
1953                        /* if we're here, we have a bug */
1954                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1955                                        "9054 BLKGRD: bug in %s\n", __func__);
1956                }
1957
1958        } while (!alldone);
1959out:
1960
1961        return num_bde;
1962}
1963
1964/**
1965 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1966 * @phba: The Hba for which this call is being executed.
1967 * @sc: pointer to scsi command we're working on
1968 * @sgl: pointer to buffer list for protection groups
1969 * @datasegcnt: number of segments of data that have been dma mapped
1970 * @lpfc_cmd: lpfc scsi command object pointer.
1971 *
1972 * This function sets up SGL buffer list for protection groups of
1973 * type LPFC_PG_TYPE_NO_DIF
1974 *
1975 * This is usually used when the HBA is instructed to generate
1976 * DIFs and insert them into data stream (or strip DIF from
1977 * incoming data stream)
1978 *
1979 * The buffer list consists of just one protection group described
1980 * below:
1981 *                                +-------------------------+
1982 *   start of prot group  -->     |         DI_SEED         |
1983 *                                +-------------------------+
1984 *                                |         Data SGE        |
1985 *                                +-------------------------+
1986 *                                |more Data SGE's ... (opt)|
1987 *                                +-------------------------+
1988 *
1989 *
1990 * Note: Data s/g buffers have been dma mapped
1991 *
1992 * Returns the number of SGEs added to the SGL.
1993 **/
1994static int
1995lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1996                struct sli4_sge *sgl, int datasegcnt,
1997                struct lpfc_io_buf *lpfc_cmd)
1998{
1999        struct scatterlist *sgde = NULL; /* s/g data entry */
2000        struct sli4_sge_diseed *diseed = NULL;
2001        dma_addr_t physaddr;
2002        int i = 0, num_sge = 0, status;
2003        uint32_t reftag;
2004        uint8_t txop, rxop;
2005#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2006        uint32_t rc;
2007#endif
2008        uint32_t checking = 1;
2009        uint32_t dma_len;
2010        uint32_t dma_offset = 0;
2011        struct sli4_hybrid_sgl *sgl_xtra = NULL;
2012        int j;
2013        bool lsp_just_set = false;
2014
2015        status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2016        if (status)
2017                goto out;
2018
2019        /* extract some info from the scsi command for pde*/
2020        reftag = scsi_prot_ref_tag(sc);
2021        if (reftag == LPFC_INVALID_REFTAG)
2022                goto out;
2023
2024#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2025        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2026        if (rc) {
2027                if (rc & BG_ERR_SWAP)
2028                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2029                if (rc & BG_ERR_CHECK)
2030                        checking = 0;
2031        }
2032#endif
2033
2034        /* setup DISEED with what we have */
2035        diseed = (struct sli4_sge_diseed *) sgl;
2036        memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2037        bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2038
2039        /* Endianness conversion if necessary */
2040        diseed->ref_tag = cpu_to_le32(reftag);
2041        diseed->ref_tag_tran = diseed->ref_tag;
2042
2043        /*
2044         * We only need to check the data on READs, for WRITEs
2045         * protection data is automatically generated, not checked.
2046         */
2047        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2048                if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
2049                        bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2050                else
2051                        bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2052
2053                if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2054                        bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2055                else
2056                        bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2057        }
2058
2059        /* setup DISEED with the rest of the info */
2060        bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2061        bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2062
2063        bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2064        bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2065
2066        /* Endianness conversion if necessary for DISEED */
2067        diseed->word2 = cpu_to_le32(diseed->word2);
2068        diseed->word3 = cpu_to_le32(diseed->word3);
2069
2070        /* advance bpl and increment sge count */
2071        num_sge++;
2072        sgl++;
2073
2074        /* assumption: caller has already run dma_map_sg on command data */
2075        sgde = scsi_sglist(sc);
2076        j = 3;
2077        for (i = 0; i < datasegcnt; i++) {
2078                /* clear it */
2079                sgl->word2 = 0;
2080
2081                /* do we need to expand the segment */
2082                if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2083                    ((datasegcnt - 1) != i)) {
2084                        /* set LSP type */
2085                        bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2086
2087                        sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2088
2089                        if (unlikely(!sgl_xtra)) {
2090                                lpfc_cmd->seg_cnt = 0;
2091                                return 0;
2092                        }
2093                        sgl->addr_lo = cpu_to_le32(putPaddrLow(
2094                                                sgl_xtra->dma_phys_sgl));
2095                        sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2096                                                sgl_xtra->dma_phys_sgl));
2097
2098                } else {
2099                        bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2100                }
2101
2102                if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2103                        if ((datasegcnt - 1) == i)
2104                                bf_set(lpfc_sli4_sge_last, sgl, 1);
2105                        physaddr = sg_dma_address(sgde);
2106                        dma_len = sg_dma_len(sgde);
2107                        sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2108                        sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2109
2110                        bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2111                        sgl->word2 = cpu_to_le32(sgl->word2);
2112                        sgl->sge_len = cpu_to_le32(dma_len);
2113
2114                        dma_offset += dma_len;
2115                        sgde = sg_next(sgde);
2116
2117                        sgl++;
2118                        num_sge++;
2119                        lsp_just_set = false;
2120
2121                } else {
2122                        sgl->word2 = cpu_to_le32(sgl->word2);
2123                        sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2124
2125                        sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2126                        i = i - 1;
2127
2128                        lsp_just_set = true;
2129                }
2130
2131                j++;
2132
2133        }
2134
2135out:
2136        return num_sge;
2137}
2138
2139/**
2140 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2141 * @phba: The Hba for which this call is being executed.
2142 * @sc: pointer to scsi command we're working on
2143 * @sgl: pointer to buffer list for protection groups
2144 * @datacnt: number of segments of data that have been dma mapped
2145 * @protcnt: number of segment of protection data that have been dma mapped
2146 * @lpfc_cmd: lpfc scsi command object pointer.
2147 *
2148 * This function sets up SGL buffer list for protection groups of
2149 * type LPFC_PG_TYPE_DIF
2150 *
2151 * This is usually used when DIFs are in their own buffers,
2152 * separate from the data. The HBA can then by instructed
2153 * to place the DIFs in the outgoing stream.  For read operations,
2154 * The HBA could extract the DIFs and place it in DIF buffers.
2155 *
2156 * The buffer list for this type consists of one or more of the
2157 * protection groups described below:
2158 *                                    +-------------------------+
2159 *   start of first prot group  -->   |         DISEED          |
2160 *                                    +-------------------------+
2161 *                                    |      DIF (Prot SGE)     |
2162 *                                    +-------------------------+
2163 *                                    |        Data SGE         |
2164 *                                    +-------------------------+
2165 *                                    |more Data SGE's ... (opt)|
2166 *                                    +-------------------------+
2167 *   start of new  prot group  -->    |         DISEED          |
2168 *                                    +-------------------------+
2169 *                                    |          ...            |
2170 *                                    +-------------------------+
2171 *
2172 * Note: It is assumed that both data and protection s/g buffers have been
2173 *       mapped for DMA
2174 *
2175 * Returns the number of SGEs added to the SGL.
2176 **/
2177static int
2178lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2179                struct sli4_sge *sgl, int datacnt, int protcnt,
2180                struct lpfc_io_buf *lpfc_cmd)
2181{
2182        struct scatterlist *sgde = NULL; /* s/g data entry */
2183        struct scatterlist *sgpe = NULL; /* s/g prot entry */
2184        struct sli4_sge_diseed *diseed = NULL;
2185        dma_addr_t dataphysaddr, protphysaddr;
2186        unsigned short curr_data = 0, curr_prot = 0;
2187        unsigned int split_offset;
2188        unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2189        unsigned int protgrp_blks, protgrp_bytes;
2190        unsigned int remainder, subtotal;
2191        int status;
2192        unsigned char pgdone = 0, alldone = 0;
2193        unsigned blksize;
2194        uint32_t reftag;
2195        uint8_t txop, rxop;
2196        uint32_t dma_len;
2197#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2198        uint32_t rc;
2199#endif
2200        uint32_t checking = 1;
2201        uint32_t dma_offset = 0;
2202        int num_sge = 0, j = 2;
2203        struct sli4_hybrid_sgl *sgl_xtra = NULL;
2204
2205        sgpe = scsi_prot_sglist(sc);
2206        sgde = scsi_sglist(sc);
2207
2208        if (!sgpe || !sgde) {
2209                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2210                                "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2211                                sgpe, sgde);
2212                return 0;
2213        }
2214
2215        status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2216        if (status)
2217                goto out;
2218
2219        /* extract some info from the scsi command */
2220        blksize = scsi_prot_interval(sc);
2221        reftag = scsi_prot_ref_tag(sc);
2222        if (reftag == LPFC_INVALID_REFTAG)
2223                goto out;
2224
2225#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2226        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2227        if (rc) {
2228                if (rc & BG_ERR_SWAP)
2229                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2230                if (rc & BG_ERR_CHECK)
2231                        checking = 0;
2232        }
2233#endif
2234
2235        split_offset = 0;
2236        do {
2237                /* Check to see if we ran out of space */
2238                if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2239                    !(phba->cfg_xpsgl))
2240                        return num_sge + 3;
2241
2242                /* DISEED and DIF have to be together */
2243                if (!((j + 1) % phba->border_sge_num) ||
2244                    !((j + 2) % phba->border_sge_num) ||
2245                    !((j + 3) % phba->border_sge_num)) {
2246                        sgl->word2 = 0;
2247
2248                        /* set LSP type */
2249                        bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2250
2251                        sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2252
2253                        if (unlikely(!sgl_xtra)) {
2254                                goto out;
2255                        } else {
2256                                sgl->addr_lo = cpu_to_le32(putPaddrLow(
2257                                                sgl_xtra->dma_phys_sgl));
2258                                sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2259                                                       sgl_xtra->dma_phys_sgl));
2260                        }
2261
2262                        sgl->word2 = cpu_to_le32(sgl->word2);
2263                        sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2264
2265                        sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2266                        j = 0;
2267                }
2268
2269                /* setup DISEED with what we have */
2270                diseed = (struct sli4_sge_diseed *) sgl;
2271                memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2272                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2273
2274                /* Endianness conversion if necessary */
2275                diseed->ref_tag = cpu_to_le32(reftag);
2276                diseed->ref_tag_tran = diseed->ref_tag;
2277
2278                if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
2279                        bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2280                } else {
2281                        bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2282                        /*
2283                         * When in this mode, the hardware will replace
2284                         * the guard tag from the host with a
2285                         * newly generated good CRC for the wire.
2286                         * Switch to raw mode here to avoid this
2287                         * behavior. What the host sends gets put on the wire.
2288                         */
2289                        if (txop == BG_OP_IN_CRC_OUT_CRC) {
2290                                txop = BG_OP_RAW_MODE;
2291                                rxop = BG_OP_RAW_MODE;
2292                        }
2293                }
2294
2295
2296                if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2297                        bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2298                else
2299                        bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2300
2301                /* setup DISEED with the rest of the info */
2302                bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2303                bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2304
2305                bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2306                bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2307
2308                /* Endianness conversion if necessary for DISEED */
2309                diseed->word2 = cpu_to_le32(diseed->word2);
2310                diseed->word3 = cpu_to_le32(diseed->word3);
2311
2312                /* advance sgl and increment bde count */
2313                num_sge++;
2314
2315                sgl++;
2316                j++;
2317
2318                /* setup the first BDE that points to protection buffer */
2319                protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2320                protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2321
2322                /* must be integer multiple of the DIF block length */
2323                BUG_ON(protgroup_len % 8);
2324
2325                /* Now setup DIF SGE */
2326                sgl->word2 = 0;
2327                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2328                sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2329                sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2330                sgl->word2 = cpu_to_le32(sgl->word2);
2331                sgl->sge_len = 0;
2332
2333                protgrp_blks = protgroup_len / 8;
2334                protgrp_bytes = protgrp_blks * blksize;
2335
2336                /* check if DIF SGE is crossing the 4K boundary; if so split */
2337                if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2338                        protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2339                        protgroup_offset += protgroup_remainder;
2340                        protgrp_blks = protgroup_remainder / 8;
2341                        protgrp_bytes = protgrp_blks * blksize;
2342                } else {
2343                        protgroup_offset = 0;
2344                        curr_prot++;
2345                }
2346
2347                num_sge++;
2348
2349                /* setup SGE's for data blocks associated with DIF data */
2350                pgdone = 0;
2351                subtotal = 0; /* total bytes processed for current prot grp */
2352
2353                sgl++;
2354                j++;
2355
2356                while (!pgdone) {
2357                        /* Check to see if we ran out of space */
2358                        if ((num_sge >= phba->cfg_total_seg_cnt) &&
2359                            !phba->cfg_xpsgl)
2360                                return num_sge + 1;
2361
2362                        if (!sgde) {
2363                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2364                                        "9086 BLKGRD:%s Invalid data segment\n",
2365                                                __func__);
2366                                return 0;
2367                        }
2368
2369                        if (!((j + 1) % phba->border_sge_num)) {
2370                                sgl->word2 = 0;
2371
2372                                /* set LSP type */
2373                                bf_set(lpfc_sli4_sge_type, sgl,
2374                                       LPFC_SGE_TYPE_LSP);
2375
2376                                sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2377                                                                 lpfc_cmd);
2378
2379                                if (unlikely(!sgl_xtra)) {
2380                                        goto out;
2381                                } else {
2382                                        sgl->addr_lo = cpu_to_le32(
2383                                          putPaddrLow(sgl_xtra->dma_phys_sgl));
2384                                        sgl->addr_hi = cpu_to_le32(
2385                                          putPaddrHigh(sgl_xtra->dma_phys_sgl));
2386                                }
2387
2388                                sgl->word2 = cpu_to_le32(sgl->word2);
2389                                sgl->sge_len = cpu_to_le32(
2390                                                     phba->cfg_sg_dma_buf_size);
2391
2392                                sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2393                        } else {
2394                                dataphysaddr = sg_dma_address(sgde) +
2395                                                                   split_offset;
2396
2397                                remainder = sg_dma_len(sgde) - split_offset;
2398
2399                                if ((subtotal + remainder) <= protgrp_bytes) {
2400                                        /* we can use this whole buffer */
2401                                        dma_len = remainder;
2402                                        split_offset = 0;
2403
2404                                        if ((subtotal + remainder) ==
2405                                                                  protgrp_bytes)
2406                                                pgdone = 1;
2407                                } else {
2408                                        /* must split this buffer with next
2409                                         * prot grp
2410                                         */
2411                                        dma_len = protgrp_bytes - subtotal;
2412                                        split_offset += dma_len;
2413                                }
2414
2415                                subtotal += dma_len;
2416
2417                                sgl->word2 = 0;
2418                                sgl->addr_lo = cpu_to_le32(putPaddrLow(
2419                                                                 dataphysaddr));
2420                                sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2421                                                                 dataphysaddr));
2422                                bf_set(lpfc_sli4_sge_last, sgl, 0);
2423                                bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2424                                bf_set(lpfc_sli4_sge_type, sgl,
2425                                       LPFC_SGE_TYPE_DATA);
2426
2427                                sgl->sge_len = cpu_to_le32(dma_len);
2428                                dma_offset += dma_len;
2429
2430                                num_sge++;
2431                                curr_data++;
2432
2433                                if (split_offset) {
2434                                        sgl++;
2435                                        j++;
2436                                        break;
2437                                }
2438
2439                                /* Move to the next s/g segment if possible */
2440                                sgde = sg_next(sgde);
2441
2442                                sgl++;
2443                        }
2444
2445                        j++;
2446                }
2447
2448                if (protgroup_offset) {
2449                        /* update the reference tag */
2450                        reftag += protgrp_blks;
2451                        continue;
2452                }
2453
2454                /* are we done ? */
2455                if (curr_prot == protcnt) {
2456                        /* mark the last SGL */
2457                        sgl--;
2458                        bf_set(lpfc_sli4_sge_last, sgl, 1);
2459                        alldone = 1;
2460                } else if (curr_prot < protcnt) {
2461                        /* advance to next prot buffer */
2462                        sgpe = sg_next(sgpe);
2463
2464                        /* update the reference tag */
2465                        reftag += protgrp_blks;
2466                } else {
2467                        /* if we're here, we have a bug */
2468                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2469                                        "9085 BLKGRD: bug in %s\n", __func__);
2470                }
2471
2472        } while (!alldone);
2473
2474out:
2475
2476        return num_sge;
2477}
2478
2479/**
2480 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2481 * @phba: The Hba for which this call is being executed.
2482 * @sc: pointer to scsi command we're working on
2483 *
2484 * Given a SCSI command that supports DIF, determine composition of protection
2485 * groups involved in setting up buffer lists
2486 *
2487 * Returns: Protection group type (with or without DIF)
2488 *
2489 **/
2490static int
2491lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2492{
2493        int ret = LPFC_PG_TYPE_INVALID;
2494        unsigned char op = scsi_get_prot_op(sc);
2495
2496        switch (op) {
2497        case SCSI_PROT_READ_STRIP:
2498        case SCSI_PROT_WRITE_INSERT:
2499                ret = LPFC_PG_TYPE_NO_DIF;
2500                break;
2501        case SCSI_PROT_READ_INSERT:
2502        case SCSI_PROT_WRITE_STRIP:
2503        case SCSI_PROT_READ_PASS:
2504        case SCSI_PROT_WRITE_PASS:
2505                ret = LPFC_PG_TYPE_DIF_BUF;
2506                break;
2507        default:
2508                if (phba)
2509                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2510                                        "9021 Unsupported protection op:%d\n",
2511                                        op);
2512                break;
2513        }
2514        return ret;
2515}
2516
2517/**
2518 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2519 * @phba: The Hba for which this call is being executed.
2520 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2521 *
2522 * Adjust the data length to account for how much data
2523 * is actually on the wire.
2524 *
2525 * returns the adjusted data length
2526 **/
2527static int
2528lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2529                       struct lpfc_io_buf *lpfc_cmd)
2530{
2531        struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2532        int fcpdl;
2533
2534        fcpdl = scsi_bufflen(sc);
2535
2536        /* Check if there is protection data on the wire */
2537        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2538                /* Read check for protection data */
2539                if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2540                        return fcpdl;
2541
2542        } else {
2543                /* Write check for protection data */
2544                if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2545                        return fcpdl;
2546        }
2547
2548        /*
2549         * If we are in DIF Type 1 mode every data block has a 8 byte
2550         * DIF (trailer) attached to it. Must ajust FCP data length
2551         * to account for the protection data.
2552         */
2553        fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
2554
2555        return fcpdl;
2556}
2557
2558/**
2559 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2560 * @phba: The Hba for which this call is being executed.
2561 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2562 *
2563 * This is the protection/DIF aware version of
2564 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2565 * two functions eventually, but for now, it's here.
2566 * RETURNS 0 - SUCCESS,
2567 *         1 - Failed DMA map, retry.
2568 *         2 - Invalid scsi cmd or prot-type. Do not rety.
2569 **/
2570static int
2571lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2572                struct lpfc_io_buf *lpfc_cmd)
2573{
2574        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2575        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2576        struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2577        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2578        uint32_t num_bde = 0;
2579        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2580        int prot_group_type = 0;
2581        int fcpdl;
2582        int ret = 1;
2583        struct lpfc_vport *vport = phba->pport;
2584
2585        /*
2586         * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2587         *  fcp_rsp regions to the first data bde entry
2588         */
2589        bpl += 2;
2590        if (scsi_sg_count(scsi_cmnd)) {
2591                /*
2592                 * The driver stores the segment count returned from dma_map_sg
2593                 * because this a count of dma-mappings used to map the use_sg
2594                 * pages.  They are not guaranteed to be the same for those
2595                 * architectures that implement an IOMMU.
2596                 */
2597                datasegcnt = dma_map_sg(&phba->pcidev->dev,
2598                                        scsi_sglist(scsi_cmnd),
2599                                        scsi_sg_count(scsi_cmnd), datadir);
2600                if (unlikely(!datasegcnt))
2601                        return 1;
2602
2603                lpfc_cmd->seg_cnt = datasegcnt;
2604
2605                /* First check if data segment count from SCSI Layer is good */
2606                if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2607                        WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2608                        ret = 2;
2609                        goto err;
2610                }
2611
2612                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2613
2614                switch (prot_group_type) {
2615                case LPFC_PG_TYPE_NO_DIF:
2616
2617                        /* Here we need to add a PDE5 and PDE6 to the count */
2618                        if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2619                                ret = 2;
2620                                goto err;
2621                        }
2622
2623                        num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2624                                        datasegcnt);
2625                        /* we should have 2 or more entries in buffer list */
2626                        if (num_bde < 2) {
2627                                ret = 2;
2628                                goto err;
2629                        }
2630                        break;
2631
2632                case LPFC_PG_TYPE_DIF_BUF:
2633                        /*
2634                         * This type indicates that protection buffers are
2635                         * passed to the driver, so that needs to be prepared
2636                         * for DMA
2637                         */
2638                        protsegcnt = dma_map_sg(&phba->pcidev->dev,
2639                                        scsi_prot_sglist(scsi_cmnd),
2640                                        scsi_prot_sg_count(scsi_cmnd), datadir);
2641                        if (unlikely(!protsegcnt)) {
2642                                scsi_dma_unmap(scsi_cmnd);
2643                                return 1;
2644                        }
2645
2646                        lpfc_cmd->prot_seg_cnt = protsegcnt;
2647
2648                        /*
2649                         * There is a minimun of 4 BPLs used for every
2650                         * protection data segment.
2651                         */
2652                        if ((lpfc_cmd->prot_seg_cnt * 4) >
2653                            (phba->cfg_total_seg_cnt - 2)) {
2654                                ret = 2;
2655                                goto err;
2656                        }
2657
2658                        num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2659                                        datasegcnt, protsegcnt);
2660                        /* we should have 3 or more entries in buffer list */
2661                        if ((num_bde < 3) ||
2662                            (num_bde > phba->cfg_total_seg_cnt)) {
2663                                ret = 2;
2664                                goto err;
2665                        }
2666                        break;
2667
2668                case LPFC_PG_TYPE_INVALID:
2669                default:
2670                        scsi_dma_unmap(scsi_cmnd);
2671                        lpfc_cmd->seg_cnt = 0;
2672
2673                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2674                                        "9022 Unexpected protection group %i\n",
2675                                        prot_group_type);
2676                        return 2;
2677                }
2678        }
2679
2680        /*
2681         * Finish initializing those IOCB fields that are dependent on the
2682         * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2683         * reinitialized since all iocb memory resources are used many times
2684         * for transmit, receive, and continuation bpl's.
2685         */
2686        iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2687        iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2688        iocb_cmd->ulpBdeCount = 1;
2689        iocb_cmd->ulpLe = 1;
2690
2691        fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2692        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2693
2694        /*
2695         * Due to difference in data length between DIF/non-DIF paths,
2696         * we need to set word 4 of IOCB here
2697         */
2698        iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2699
2700        /*
2701         * For First burst, we may need to adjust the initial transfer
2702         * length for DIF
2703         */
2704        if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2705            (fcpdl < vport->cfg_first_burst_size))
2706                iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2707
2708        return 0;
2709err:
2710        if (lpfc_cmd->seg_cnt)
2711                scsi_dma_unmap(scsi_cmnd);
2712        if (lpfc_cmd->prot_seg_cnt)
2713                dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2714                             scsi_prot_sg_count(scsi_cmnd),
2715                             scsi_cmnd->sc_data_direction);
2716
2717        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2718                        "9023 Cannot setup S/G List for HBA"
2719                        "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2720                        lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2721                        phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2722                        prot_group_type, num_bde);
2723
2724        lpfc_cmd->seg_cnt = 0;
2725        lpfc_cmd->prot_seg_cnt = 0;
2726        return ret;
2727}
2728
2729/*
2730 * This function calcuates the T10 DIF guard tag
2731 * on the specified data using a CRC algorithmn
2732 * using crc_t10dif.
2733 */
2734static uint16_t
2735lpfc_bg_crc(uint8_t *data, int count)
2736{
2737        uint16_t crc = 0;
2738        uint16_t x;
2739
2740        crc = crc_t10dif(data, count);
2741        x = cpu_to_be16(crc);
2742        return x;
2743}
2744
2745/*
2746 * This function calcuates the T10 DIF guard tag
2747 * on the specified data using a CSUM algorithmn
2748 * using ip_compute_csum.
2749 */
2750static uint16_t
2751lpfc_bg_csum(uint8_t *data, int count)
2752{
2753        uint16_t ret;
2754
2755        ret = ip_compute_csum(data, count);
2756        return ret;
2757}
2758
2759/*
2760 * This function examines the protection data to try to determine
2761 * what type of T10-DIF error occurred.
2762 */
2763static void
2764lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2765{
2766        struct scatterlist *sgpe; /* s/g prot entry */
2767        struct scatterlist *sgde; /* s/g data entry */
2768        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2769        struct scsi_dif_tuple *src = NULL;
2770        uint8_t *data_src = NULL;
2771        uint16_t guard_tag;
2772        uint16_t start_app_tag, app_tag;
2773        uint32_t start_ref_tag, ref_tag;
2774        int prot, protsegcnt;
2775        int err_type, len, data_len;
2776        int chk_ref, chk_app, chk_guard;
2777        uint16_t sum;
2778        unsigned blksize;
2779
2780        err_type = BGS_GUARD_ERR_MASK;
2781        sum = 0;
2782        guard_tag = 0;
2783
2784        /* First check to see if there is protection data to examine */
2785        prot = scsi_get_prot_op(cmd);
2786        if ((prot == SCSI_PROT_READ_STRIP) ||
2787            (prot == SCSI_PROT_WRITE_INSERT) ||
2788            (prot == SCSI_PROT_NORMAL))
2789                goto out;
2790
2791        /* Currently the driver just supports ref_tag and guard_tag checking */
2792        chk_ref = 1;
2793        chk_app = 0;
2794        chk_guard = 0;
2795
2796        /* Setup a ptr to the protection data provided by the SCSI host */
2797        sgpe = scsi_prot_sglist(cmd);
2798        protsegcnt = lpfc_cmd->prot_seg_cnt;
2799
2800        if (sgpe && protsegcnt) {
2801
2802                /*
2803                 * We will only try to verify guard tag if the segment
2804                 * data length is a multiple of the blksize.
2805                 */
2806                sgde = scsi_sglist(cmd);
2807                blksize = scsi_prot_interval(cmd);
2808                data_src = (uint8_t *)sg_virt(sgde);
2809                data_len = sgde->length;
2810                if ((data_len & (blksize - 1)) == 0)
2811                        chk_guard = 1;
2812
2813                src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2814                start_ref_tag = scsi_prot_ref_tag(cmd);
2815                if (start_ref_tag == LPFC_INVALID_REFTAG)
2816                        goto out;
2817                start_app_tag = src->app_tag;
2818                len = sgpe->length;
2819                while (src && protsegcnt) {
2820                        while (len) {
2821
2822                                /*
2823                                 * First check to see if a protection data
2824                                 * check is valid
2825                                 */
2826                                if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2827                                    (src->app_tag == T10_PI_APP_ESCAPE)) {
2828                                        start_ref_tag++;
2829                                        goto skipit;
2830                                }
2831
2832                                /* First Guard Tag checking */
2833                                if (chk_guard) {
2834                                        guard_tag = src->guard_tag;
2835                                        if (cmd->prot_flags
2836                                            & SCSI_PROT_IP_CHECKSUM)
2837                                                sum = lpfc_bg_csum(data_src,
2838                                                                   blksize);
2839                                        else
2840                                                sum = lpfc_bg_crc(data_src,
2841                                                                  blksize);
2842                                        if ((guard_tag != sum)) {
2843                                                err_type = BGS_GUARD_ERR_MASK;
2844                                                goto out;
2845                                        }
2846                                }
2847
2848                                /* Reference Tag checking */
2849                                ref_tag = be32_to_cpu(src->ref_tag);
2850                                if (chk_ref && (ref_tag != start_ref_tag)) {
2851                                        err_type = BGS_REFTAG_ERR_MASK;
2852                                        goto out;
2853                                }
2854                                start_ref_tag++;
2855
2856                                /* App Tag checking */
2857                                app_tag = src->app_tag;
2858                                if (chk_app && (app_tag != start_app_tag)) {
2859                                        err_type = BGS_APPTAG_ERR_MASK;
2860                                        goto out;
2861                                }
2862skipit:
2863                                len -= sizeof(struct scsi_dif_tuple);
2864                                if (len < 0)
2865                                        len = 0;
2866                                src++;
2867
2868                                data_src += blksize;
2869                                data_len -= blksize;
2870
2871                                /*
2872                                 * Are we at the end of the Data segment?
2873                                 * The data segment is only used for Guard
2874                                 * tag checking.
2875                                 */
2876                                if (chk_guard && (data_len == 0)) {
2877                                        chk_guard = 0;
2878                                        sgde = sg_next(sgde);
2879                                        if (!sgde)
2880                                                goto out;
2881
2882                                        data_src = (uint8_t *)sg_virt(sgde);
2883                                        data_len = sgde->length;
2884                                        if ((data_len & (blksize - 1)) == 0)
2885                                                chk_guard = 1;
2886                                }
2887                        }
2888
2889                        /* Goto the next Protection data segment */
2890                        sgpe = sg_next(sgpe);
2891                        if (sgpe) {
2892                                src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2893                                len = sgpe->length;
2894                        } else {
2895                                src = NULL;
2896                        }
2897                        protsegcnt--;
2898                }
2899        }
2900out:
2901        if (err_type == BGS_GUARD_ERR_MASK) {
2902                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2903                set_host_byte(cmd, DID_ABORT);
2904                phba->bg_guard_err_cnt++;
2905                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2906                                "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2907                                scsi_prot_ref_tag(cmd),
2908                                sum, guard_tag);
2909
2910        } else if (err_type == BGS_REFTAG_ERR_MASK) {
2911                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2912                set_host_byte(cmd, DID_ABORT);
2913
2914                phba->bg_reftag_err_cnt++;
2915                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2916                                "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2917                                scsi_prot_ref_tag(cmd),
2918                                ref_tag, start_ref_tag);
2919
2920        } else if (err_type == BGS_APPTAG_ERR_MASK) {
2921                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2922                set_host_byte(cmd, DID_ABORT);
2923
2924                phba->bg_apptag_err_cnt++;
2925                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2926                                "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2927                                scsi_prot_ref_tag(cmd),
2928                                app_tag, start_app_tag);
2929        }
2930}
2931
2932/*
2933 * This function checks for BlockGuard errors detected by
2934 * the HBA.  In case of errors, the ASC/ASCQ fields in the
2935 * sense buffer will be set accordingly, paired with
2936 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2937 * detected corruption.
2938 *
2939 * Returns:
2940 *  0 - No error found
2941 *  1 - BlockGuard error found
2942 * -1 - Internal error (bad profile, ...etc)
2943 */
2944static int
2945lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2946                       struct lpfc_wcqe_complete *wcqe)
2947{
2948        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2949        int ret = 0;
2950        u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2951        u32 bghm = 0;
2952        u32 bgstat = 0;
2953        u64 failing_sector = 0;
2954
2955        if (status == CQE_STATUS_DI_ERROR) {
2956                if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
2957                        bgstat |= BGS_GUARD_ERR_MASK;
2958                if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
2959                        bgstat |= BGS_APPTAG_ERR_MASK;
2960                if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
2961                        bgstat |= BGS_REFTAG_ERR_MASK;
2962
2963                /* Check to see if there was any good data before the error */
2964                if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2965                        bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2966                        bghm = wcqe->total_data_placed;
2967                }
2968
2969                /*
2970                 * Set ALL the error bits to indicate we don't know what
2971                 * type of error it is.
2972                 */
2973                if (!bgstat)
2974                        bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2975                                BGS_GUARD_ERR_MASK);
2976        }
2977
2978        if (lpfc_bgs_get_guard_err(bgstat)) {
2979                ret = 1;
2980
2981                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2982                set_host_byte(cmd, DID_ABORT);
2983                phba->bg_guard_err_cnt++;
2984                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2985                                "9059 BLKGRD: Guard Tag error in cmd"
2986                                " 0x%x lba 0x%llx blk cnt 0x%x "
2987                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2988                                (unsigned long long)scsi_get_lba(cmd),
2989                                scsi_logical_block_count(cmd), bgstat, bghm);
2990        }
2991
2992        if (lpfc_bgs_get_reftag_err(bgstat)) {
2993                ret = 1;
2994
2995                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2996                set_host_byte(cmd, DID_ABORT);
2997
2998                phba->bg_reftag_err_cnt++;
2999                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3000                                "9060 BLKGRD: Ref Tag error in cmd"
3001                                " 0x%x lba 0x%llx blk cnt 0x%x "
3002                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3003                                (unsigned long long)scsi_get_lba(cmd),
3004                                scsi_logical_block_count(cmd), bgstat, bghm);
3005        }
3006
3007        if (lpfc_bgs_get_apptag_err(bgstat)) {
3008                ret = 1;
3009
3010                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3011                set_host_byte(cmd, DID_ABORT);
3012
3013                phba->bg_apptag_err_cnt++;
3014                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3015                                "9062 BLKGRD: App Tag error in cmd"
3016                                " 0x%x lba 0x%llx blk cnt 0x%x "
3017                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3018                                (unsigned long long)scsi_get_lba(cmd),
3019                                scsi_logical_block_count(cmd), bgstat, bghm);
3020        }
3021
3022        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3023                /*
3024                 * setup sense data descriptor 0 per SPC-4 as an information
3025                 * field, and put the failing LBA in it.
3026                 * This code assumes there was also a guard/app/ref tag error
3027                 * indication.
3028                 */
3029                cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3030                cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3031                cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3032                cmd->sense_buffer[10] = 0x80; /* Validity bit */
3033
3034                /* bghm is a "on the wire" FC frame based count */
3035                switch (scsi_get_prot_op(cmd)) {
3036                case SCSI_PROT_READ_INSERT:
3037                case SCSI_PROT_WRITE_STRIP:
3038                        bghm /= cmd->device->sector_size;
3039                        break;
3040                case SCSI_PROT_READ_STRIP:
3041                case SCSI_PROT_WRITE_INSERT:
3042                case SCSI_PROT_READ_PASS:
3043                case SCSI_PROT_WRITE_PASS:
3044                        bghm /= (cmd->device->sector_size +
3045                                sizeof(struct scsi_dif_tuple));
3046                        break;
3047                }
3048
3049                failing_sector = scsi_get_lba(cmd);
3050                failing_sector += bghm;
3051
3052                /* Descriptor Information */
3053                put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3054        }
3055
3056        if (!ret) {
3057                /* No error was reported - problem in FW? */
3058                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3059                                "9068 BLKGRD: Unknown error in cmd"
3060                                " 0x%x lba 0x%llx blk cnt 0x%x "
3061                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3062                                (unsigned long long)scsi_get_lba(cmd),
3063                                scsi_logical_block_count(cmd), bgstat, bghm);
3064
3065                /* Calculate what type of error it was */
3066                lpfc_calc_bg_err(phba, lpfc_cmd);
3067        }
3068        return ret;
3069}
3070
3071/*
3072 * This function checks for BlockGuard errors detected by
3073 * the HBA.  In case of errors, the ASC/ASCQ fields in the
3074 * sense buffer will be set accordingly, paired with
3075 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3076 * detected corruption.
3077 *
3078 * Returns:
3079 *  0 - No error found
3080 *  1 - BlockGuard error found
3081 * -1 - Internal error (bad profile, ...etc)
3082 */
3083static int
3084lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3085                  struct lpfc_iocbq *pIocbOut)
3086{
3087        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3088        struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3089        int ret = 0;
3090        uint32_t bghm = bgf->bghm;
3091        uint32_t bgstat = bgf->bgstat;
3092        uint64_t failing_sector = 0;
3093
3094        if (lpfc_bgs_get_invalid_prof(bgstat)) {
3095                cmd->result = DID_ERROR << 16;
3096                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3097                                "9072 BLKGRD: Invalid BG Profile in cmd "
3098                                "0x%x reftag 0x%x blk cnt 0x%x "
3099                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3100                                scsi_prot_ref_tag(cmd),
3101                                scsi_logical_block_count(cmd), bgstat, bghm);
3102                ret = (-1);
3103                goto out;
3104        }
3105
3106        if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3107                cmd->result = DID_ERROR << 16;
3108                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3109                                "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3110                                "0x%x reftag 0x%x blk cnt 0x%x "
3111                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3112                                scsi_prot_ref_tag(cmd),
3113                                scsi_logical_block_count(cmd), bgstat, bghm);
3114                ret = (-1);
3115                goto out;
3116        }
3117
3118        if (lpfc_bgs_get_guard_err(bgstat)) {
3119                ret = 1;
3120
3121                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3122                set_host_byte(cmd, DID_ABORT);
3123                phba->bg_guard_err_cnt++;
3124                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3125                                "9055 BLKGRD: Guard Tag error in cmd "
3126                                "0x%x reftag 0x%x blk cnt 0x%x "
3127                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3128                                scsi_prot_ref_tag(cmd),
3129                                scsi_logical_block_count(cmd), bgstat, bghm);
3130        }
3131
3132        if (lpfc_bgs_get_reftag_err(bgstat)) {
3133                ret = 1;
3134
3135                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3136                set_host_byte(cmd, DID_ABORT);
3137
3138                phba->bg_reftag_err_cnt++;
3139                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3140                                "9056 BLKGRD: Ref Tag error in cmd "
3141                                "0x%x reftag 0x%x blk cnt 0x%x "
3142                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3143                                scsi_prot_ref_tag(cmd),
3144                                scsi_logical_block_count(cmd), bgstat, bghm);
3145        }
3146
3147        if (lpfc_bgs_get_apptag_err(bgstat)) {
3148                ret = 1;
3149
3150                scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3151                set_host_byte(cmd, DID_ABORT);
3152
3153                phba->bg_apptag_err_cnt++;
3154                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3155                                "9061 BLKGRD: App Tag error in cmd "
3156                                "0x%x reftag 0x%x blk cnt 0x%x "
3157                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3158                                scsi_prot_ref_tag(cmd),
3159                                scsi_logical_block_count(cmd), bgstat, bghm);
3160        }
3161
3162        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3163                /*
3164                 * setup sense data descriptor 0 per SPC-4 as an information
3165                 * field, and put the failing LBA in it.
3166                 * This code assumes there was also a guard/app/ref tag error
3167                 * indication.
3168                 */
3169                cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3170                cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3171                cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3172                cmd->sense_buffer[10] = 0x80; /* Validity bit */
3173
3174                /* bghm is a "on the wire" FC frame based count */
3175                switch (scsi_get_prot_op(cmd)) {
3176                case SCSI_PROT_READ_INSERT:
3177                case SCSI_PROT_WRITE_STRIP:
3178                        bghm /= cmd->device->sector_size;
3179                        break;
3180                case SCSI_PROT_READ_STRIP:
3181                case SCSI_PROT_WRITE_INSERT:
3182                case SCSI_PROT_READ_PASS:
3183                case SCSI_PROT_WRITE_PASS:
3184                        bghm /= (cmd->device->sector_size +
3185                                sizeof(struct scsi_dif_tuple));
3186                        break;
3187                }
3188
3189                failing_sector = scsi_get_lba(cmd);
3190                failing_sector += bghm;
3191
3192                /* Descriptor Information */
3193                put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3194        }
3195
3196        if (!ret) {
3197                /* No error was reported - problem in FW? */
3198                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3199                                "9057 BLKGRD: Unknown error in cmd "
3200                                "0x%x reftag 0x%x blk cnt 0x%x "
3201                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3202                                scsi_prot_ref_tag(cmd),
3203                                scsi_logical_block_count(cmd), bgstat, bghm);
3204
3205                /* Calculate what type of error it was */
3206                lpfc_calc_bg_err(phba, lpfc_cmd);
3207        }
3208out:
3209        return ret;
3210}
3211
3212/**
3213 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3214 * @phba: The Hba for which this call is being executed.
3215 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3216 *
3217 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3218 * field of @lpfc_cmd for device with SLI-4 interface spec.
3219 *
3220 * Return codes:
3221 *      2 - Error - Do not retry
3222 *      1 - Error - Retry
3223 *      0 - Success
3224 **/
3225static int
3226lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3227{
3228        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3229        struct scatterlist *sgel = NULL;
3230        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3231        struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3232        struct sli4_sge *first_data_sgl;
3233        struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3234        struct lpfc_vport *vport = phba->pport;
3235        union lpfc_wqe128 *wqe = &pwqeq->wqe;
3236        dma_addr_t physaddr;
3237        uint32_t dma_len;
3238        uint32_t dma_offset = 0;
3239        int nseg, i, j;
3240        struct ulp_bde64 *bde;
3241        bool lsp_just_set = false;
3242        struct sli4_hybrid_sgl *sgl_xtra = NULL;
3243
3244        /*
3245         * There are three possibilities here - use scatter-gather segment, use
3246         * the single mapping, or neither.  Start the lpfc command prep by
3247         * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3248         * data bde entry.
3249         */
3250        if (scsi_sg_count(scsi_cmnd)) {
3251                /*
3252                 * The driver stores the segment count returned from dma_map_sg
3253                 * because this a count of dma-mappings used to map the use_sg
3254                 * pages.  They are not guaranteed to be the same for those
3255                 * architectures that implement an IOMMU.
3256                 */
3257
3258                nseg = scsi_dma_map(scsi_cmnd);
3259                if (unlikely(nseg <= 0))
3260                        return 1;
3261                sgl += 1;
3262                /* clear the last flag in the fcp_rsp map entry */
3263                sgl->word2 = le32_to_cpu(sgl->word2);
3264                bf_set(lpfc_sli4_sge_last, sgl, 0);
3265                sgl->word2 = cpu_to_le32(sgl->word2);
3266                sgl += 1;
3267                first_data_sgl = sgl;
3268                lpfc_cmd->seg_cnt = nseg;
3269                if (!phba->cfg_xpsgl &&
3270                    lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3271                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3272                                        "9074 BLKGRD:"
3273                                        " %s: Too many sg segments from "
3274                                        "dma_map_sg.  Config %d, seg_cnt %d\n",
3275                                        __func__, phba->cfg_sg_seg_cnt,
3276                                        lpfc_cmd->seg_cnt);
3277                        WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3278                        lpfc_cmd->seg_cnt = 0;
3279                        scsi_dma_unmap(scsi_cmnd);
3280                        return 2;
3281                }
3282
3283                /*
3284                 * The driver established a maximum scatter-gather segment count
3285                 * during probe that limits the number of sg elements in any
3286                 * single scsi command.  Just run through the seg_cnt and format
3287                 * the sge's.
3288                 * When using SLI-3 the driver will try to fit all the BDEs into
3289                 * the IOCB. If it can't then the BDEs get added to a BPL as it
3290                 * does for SLI-2 mode.
3291                 */
3292
3293                /* for tracking segment boundaries */
3294                sgel = scsi_sglist(scsi_cmnd);
3295                j = 2;
3296                for (i = 0; i < nseg; i++) {
3297                        sgl->word2 = 0;
3298                        if (nseg == 1) {
3299                                bf_set(lpfc_sli4_sge_last, sgl, 1);
3300                                bf_set(lpfc_sli4_sge_type, sgl,
3301                                       LPFC_SGE_TYPE_DATA);
3302                        } else {
3303                                bf_set(lpfc_sli4_sge_last, sgl, 0);
3304
3305                                /* do we need to expand the segment */
3306                                if (!lsp_just_set &&
3307                                    !((j + 1) % phba->border_sge_num) &&
3308                                    ((nseg - 1) != i)) {
3309                                        /* set LSP type */
3310                                        bf_set(lpfc_sli4_sge_type, sgl,
3311                                               LPFC_SGE_TYPE_LSP);
3312
3313                                        sgl_xtra = lpfc_get_sgl_per_hdwq(
3314                                                        phba, lpfc_cmd);
3315
3316                                        if (unlikely(!sgl_xtra)) {
3317                                                lpfc_cmd->seg_cnt = 0;
3318                                                scsi_dma_unmap(scsi_cmnd);
3319                                                return 1;
3320                                        }
3321                                        sgl->addr_lo = cpu_to_le32(putPaddrLow(
3322                                                       sgl_xtra->dma_phys_sgl));
3323                                        sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3324                                                       sgl_xtra->dma_phys_sgl));
3325
3326                                } else {
3327                                        bf_set(lpfc_sli4_sge_type, sgl,
3328                                               LPFC_SGE_TYPE_DATA);
3329                                }
3330                        }
3331
3332                        if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3333                                     LPFC_SGE_TYPE_LSP)) {
3334                                if ((nseg - 1) == i)
3335                                        bf_set(lpfc_sli4_sge_last, sgl, 1);
3336
3337                                physaddr = sg_dma_address(sgel);
3338                                dma_len = sg_dma_len(sgel);
3339                                sgl->addr_lo = cpu_to_le32(putPaddrLow(
3340                                                           physaddr));
3341                                sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3342                                                           physaddr));
3343
3344                                bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3345                                sgl->word2 = cpu_to_le32(sgl->word2);
3346                                sgl->sge_len = cpu_to_le32(dma_len);
3347
3348                                dma_offset += dma_len;
3349                                sgel = sg_next(sgel);
3350
3351                                sgl++;
3352                                lsp_just_set = false;
3353
3354                        } else {
3355                                sgl->word2 = cpu_to_le32(sgl->word2);
3356                                sgl->sge_len = cpu_to_le32(
3357                                                     phba->cfg_sg_dma_buf_size);
3358
3359                                sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3360                                i = i - 1;
3361
3362                                lsp_just_set = true;
3363                        }
3364
3365                        j++;
3366                }
3367
3368                /* PBDE support for first data SGE only.
3369                 * For FCoE, we key off Performance Hints.
3370                 * For FC, we key off lpfc_enable_pbde.
3371                 */
3372                if (nseg == 1 &&
3373                    ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3374                     phba->cfg_enable_pbde)) {
3375                        /* Words 13-15 */
3376                        bde = (struct ulp_bde64 *)
3377                                &wqe->words[13];
3378                        bde->addrLow = first_data_sgl->addr_lo;
3379                        bde->addrHigh = first_data_sgl->addr_hi;
3380                        bde->tus.f.bdeSize =
3381                                        le32_to_cpu(first_data_sgl->sge_len);
3382                        bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3383                        bde->tus.w = cpu_to_le32(bde->tus.w);
3384
3385                        /* Word 11 - set PBDE bit */
3386                        bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3387                } else {
3388                        memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3389                        /* Word 11 - PBDE bit disabled by default template */
3390                }
3391        } else {
3392                sgl += 1;
3393                /* set the last flag in the fcp_rsp map entry */
3394                sgl->word2 = le32_to_cpu(sgl->word2);
3395                bf_set(lpfc_sli4_sge_last, sgl, 1);
3396                sgl->word2 = cpu_to_le32(sgl->word2);
3397
3398                if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3399                    phba->cfg_enable_pbde) {
3400                        bde = (struct ulp_bde64 *)
3401                                &wqe->words[13];
3402                        memset(bde, 0, (sizeof(uint32_t) * 3));
3403                }
3404        }
3405
3406        /*
3407         * Finish initializing those IOCB fields that are dependent on the
3408         * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3409         * explicitly reinitialized.
3410         * all iocb memory resources are reused.
3411         */
3412        fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3413        /* Set first-burst provided it was successfully negotiated */
3414        if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3415            vport->cfg_first_burst_size &&
3416            scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3417                u32 init_len, total_len;
3418
3419                total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3420                init_len = min(total_len, vport->cfg_first_burst_size);
3421
3422                /* Word 4 & 5 */
3423                wqe->fcp_iwrite.initial_xfer_len = init_len;
3424                wqe->fcp_iwrite.total_xfer_len = total_len;
3425        } else {
3426                /* Word 4 */
3427                wqe->fcp_iwrite.total_xfer_len =
3428                        be32_to_cpu(fcp_cmnd->fcpDl);
3429        }
3430
3431        /*
3432         * If the OAS driver feature is enabled and the lun is enabled for
3433         * OAS, set the oas iocb related flags.
3434         */
3435        if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3436                scsi_cmnd->device->hostdata)->oas_enabled) {
3437                lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3438                lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3439                        scsi_cmnd->device->hostdata)->priority;
3440
3441                /* Word 10 */
3442                bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3443                bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3444
3445                if (lpfc_cmd->cur_iocbq.priority)
3446                        bf_set(wqe_ccp, &wqe->generic.wqe_com,
3447                               (lpfc_cmd->cur_iocbq.priority << 1));
3448                else
3449                        bf_set(wqe_ccp, &wqe->generic.wqe_com,
3450                               (phba->cfg_XLanePriority << 1));
3451        }
3452
3453        return 0;
3454}
3455
3456/**
3457 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3458 * @phba: The Hba for which this call is being executed.
3459 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3460 *
3461 * This is the protection/DIF aware version of
3462 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3463 * two functions eventually, but for now, it's here
3464 * Return codes:
3465 *      2 - Error - Do not retry
3466 *      1 - Error - Retry
3467 *      0 - Success
3468 **/
3469static int
3470lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3471                struct lpfc_io_buf *lpfc_cmd)
3472{
3473        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3474        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3475        struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3476        struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3477        union lpfc_wqe128 *wqe = &pwqeq->wqe;
3478        uint32_t num_sge = 0;
3479        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3480        int prot_group_type = 0;
3481        int fcpdl;
3482        int ret = 1;
3483        struct lpfc_vport *vport = phba->pport;
3484
3485        /*
3486         * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3487         *  fcp_rsp regions to the first data sge entry
3488         */
3489        if (scsi_sg_count(scsi_cmnd)) {
3490                /*
3491                 * The driver stores the segment count returned from dma_map_sg
3492                 * because this a count of dma-mappings used to map the use_sg
3493                 * pages.  They are not guaranteed to be the same for those
3494                 * architectures that implement an IOMMU.
3495                 */
3496                datasegcnt = dma_map_sg(&phba->pcidev->dev,
3497                                        scsi_sglist(scsi_cmnd),
3498                                        scsi_sg_count(scsi_cmnd), datadir);
3499                if (unlikely(!datasegcnt))
3500                        return 1;
3501
3502                sgl += 1;
3503                /* clear the last flag in the fcp_rsp map entry */
3504                sgl->word2 = le32_to_cpu(sgl->word2);
3505                bf_set(lpfc_sli4_sge_last, sgl, 0);
3506                sgl->word2 = cpu_to_le32(sgl->word2);
3507
3508                sgl += 1;
3509                lpfc_cmd->seg_cnt = datasegcnt;
3510
3511                /* First check if data segment count from SCSI Layer is good */
3512                if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3513                    !phba->cfg_xpsgl) {
3514                        WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3515                        ret = 2;
3516                        goto err;
3517                }
3518
3519                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3520
3521                switch (prot_group_type) {
3522                case LPFC_PG_TYPE_NO_DIF:
3523                        /* Here we need to add a DISEED to the count */
3524                        if (((lpfc_cmd->seg_cnt + 1) >
3525                                        phba->cfg_total_seg_cnt) &&
3526                            !phba->cfg_xpsgl) {
3527                                ret = 2;
3528                                goto err;
3529                        }
3530
3531                        num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3532                                        datasegcnt, lpfc_cmd);
3533
3534                        /* we should have 2 or more entries in buffer list */
3535                        if (num_sge < 2) {
3536                                ret = 2;
3537                                goto err;
3538                        }
3539                        break;
3540
3541                case LPFC_PG_TYPE_DIF_BUF:
3542                        /*
3543                         * This type indicates that protection buffers are
3544                         * passed to the driver, so that needs to be prepared
3545                         * for DMA
3546                         */
3547                        protsegcnt = dma_map_sg(&phba->pcidev->dev,
3548                                        scsi_prot_sglist(scsi_cmnd),
3549                                        scsi_prot_sg_count(scsi_cmnd), datadir);
3550                        if (unlikely(!protsegcnt)) {
3551                                scsi_dma_unmap(scsi_cmnd);
3552                                return 1;
3553                        }
3554
3555                        lpfc_cmd->prot_seg_cnt = protsegcnt;
3556                        /*
3557                         * There is a minimun of 3 SGEs used for every
3558                         * protection data segment.
3559                         */
3560                        if (((lpfc_cmd->prot_seg_cnt * 3) >
3561                                        (phba->cfg_total_seg_cnt - 2)) &&
3562                            !phba->cfg_xpsgl) {
3563                                ret = 2;
3564                                goto err;
3565                        }
3566
3567                        num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3568                                        datasegcnt, protsegcnt, lpfc_cmd);
3569
3570                        /* we should have 3 or more entries in buffer list */
3571                        if (num_sge < 3 ||
3572                            (num_sge > phba->cfg_total_seg_cnt &&
3573                             !phba->cfg_xpsgl)) {
3574                                ret = 2;
3575                                goto err;
3576                        }
3577                        break;
3578
3579                case LPFC_PG_TYPE_INVALID:
3580                default:
3581                        scsi_dma_unmap(scsi_cmnd);
3582                        lpfc_cmd->seg_cnt = 0;
3583
3584                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3585                                        "9083 Unexpected protection group %i\n",
3586                                        prot_group_type);
3587                        return 2;
3588                }
3589        }
3590
3591        switch (scsi_get_prot_op(scsi_cmnd)) {
3592        case SCSI_PROT_WRITE_STRIP:
3593        case SCSI_PROT_READ_STRIP:
3594                lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3595                break;
3596        case SCSI_PROT_WRITE_INSERT:
3597        case SCSI_PROT_READ_INSERT:
3598                lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3599                break;
3600        case SCSI_PROT_WRITE_PASS:
3601        case SCSI_PROT_READ_PASS:
3602                lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3603                break;
3604        }
3605
3606        fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3607        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3608
3609        /* Set first-burst provided it was successfully negotiated */
3610        if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3611            vport->cfg_first_burst_size &&
3612            scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3613                u32 init_len, total_len;
3614
3615                total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3616                init_len = min(total_len, vport->cfg_first_burst_size);
3617
3618                /* Word 4 & 5 */
3619                wqe->fcp_iwrite.initial_xfer_len = init_len;
3620                wqe->fcp_iwrite.total_xfer_len = total_len;
3621        } else {
3622                /* Word 4 */
3623                wqe->fcp_iwrite.total_xfer_len =
3624                        be32_to_cpu(fcp_cmnd->fcpDl);
3625        }
3626
3627        /*
3628         * If the OAS driver feature is enabled and the lun is enabled for
3629         * OAS, set the oas iocb related flags.
3630         */
3631        if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3632                scsi_cmnd->device->hostdata)->oas_enabled) {
3633                lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3634
3635                /* Word 10 */
3636                bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3637                bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3638                bf_set(wqe_ccp, &wqe->generic.wqe_com,
3639                       (phba->cfg_XLanePriority << 1));
3640        }
3641
3642        /* Word 7. DIF Flags */
3643        if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3644                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3645        else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3646                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3647        else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3648                bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3649
3650        lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3651                                 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3652
3653        return 0;
3654err:
3655        if (lpfc_cmd->seg_cnt)
3656                scsi_dma_unmap(scsi_cmnd);
3657        if (lpfc_cmd->prot_seg_cnt)
3658                dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3659                             scsi_prot_sg_count(scsi_cmnd),
3660                             scsi_cmnd->sc_data_direction);
3661
3662        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3663                        "9084 Cannot setup S/G List for HBA"
3664                        "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3665                        lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3666                        phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3667                        prot_group_type, num_sge);
3668
3669        lpfc_cmd->seg_cnt = 0;
3670        lpfc_cmd->prot_seg_cnt = 0;
3671        return ret;
3672}
3673
3674/**
3675 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3676 * @phba: The Hba for which this call is being executed.
3677 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3678 *
3679 * This routine wraps the actual DMA mapping function pointer from the
3680 * lpfc_hba struct.
3681 *
3682 * Return codes:
3683 *      1 - Error
3684 *      0 - Success
3685 **/
3686static inline int
3687lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3688{
3689        return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3690}
3691
3692/**
3693 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3694 * using BlockGuard.
3695 * @phba: The Hba for which this call is being executed.
3696 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3697 *
3698 * This routine wraps the actual DMA mapping function pointer from the
3699 * lpfc_hba struct.
3700 *
3701 * Return codes:
3702 *      1 - Error
3703 *      0 - Success
3704 **/
3705static inline int
3706lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3707{
3708        return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3709}
3710
3711/**
3712 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3713 * buffer
3714 * @vport: Pointer to vport object.
3715 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3716 * @tmo: Timeout value for IO
3717 *
3718 * This routine initializes IOCB/WQE data structure from scsi command
3719 *
3720 * Return codes:
3721 *      1 - Error
3722 *      0 - Success
3723 **/
3724static inline int
3725lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3726                        uint8_t tmo)
3727{
3728        return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3729}
3730
3731/**
3732 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3733 * @phba: Pointer to hba context object.
3734 * @vport: Pointer to vport object.
3735 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3736 * @fcpi_parm: FCP Initiator parameter.
3737 *
3738 * This function posts an event when there is a SCSI command reporting
3739 * error from the scsi device.
3740 **/
3741static void
3742lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3743                struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3744        struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3745        struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3746        uint32_t resp_info = fcprsp->rspStatus2;
3747        uint32_t scsi_status = fcprsp->rspStatus3;
3748        struct lpfc_fast_path_event *fast_path_evt = NULL;
3749        struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3750        unsigned long flags;
3751
3752        if (!pnode)
3753                return;
3754
3755        /* If there is queuefull or busy condition send a scsi event */
3756        if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3757                (cmnd->result == SAM_STAT_BUSY)) {
3758                fast_path_evt = lpfc_alloc_fast_evt(phba);
3759                if (!fast_path_evt)
3760                        return;
3761                fast_path_evt->un.scsi_evt.event_type =
3762                        FC_REG_SCSI_EVENT;
3763                fast_path_evt->un.scsi_evt.subcategory =
3764                (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3765                LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3766                fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3767                memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3768                        &pnode->nlp_portname, sizeof(struct lpfc_name));
3769                memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3770                        &pnode->nlp_nodename, sizeof(struct lpfc_name));
3771        } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3772                ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3773                fast_path_evt = lpfc_alloc_fast_evt(phba);
3774                if (!fast_path_evt)
3775                        return;
3776                fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3777                        FC_REG_SCSI_EVENT;
3778                fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3779                        LPFC_EVENT_CHECK_COND;
3780                fast_path_evt->un.check_cond_evt.scsi_event.lun =
3781                        cmnd->device->lun;
3782                memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3783                        &pnode->nlp_portname, sizeof(struct lpfc_name));
3784                memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3785                        &pnode->nlp_nodename, sizeof(struct lpfc_name));
3786                fast_path_evt->un.check_cond_evt.sense_key =
3787                        cmnd->sense_buffer[2] & 0xf;
3788                fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3789                fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3790        } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3791                     fcpi_parm &&
3792                     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3793                        ((scsi_status == SAM_STAT_GOOD) &&
3794                        !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3795                /*
3796                 * If status is good or resid does not match with fcp_param and
3797                 * there is valid fcpi_parm, then there is a read_check error
3798                 */
3799                fast_path_evt = lpfc_alloc_fast_evt(phba);
3800                if (!fast_path_evt)
3801                        return;
3802                fast_path_evt->un.read_check_error.header.event_type =
3803                        FC_REG_FABRIC_EVENT;
3804                fast_path_evt->un.read_check_error.header.subcategory =
3805                        LPFC_EVENT_FCPRDCHKERR;
3806                memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3807                        &pnode->nlp_portname, sizeof(struct lpfc_name));
3808                memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3809                        &pnode->nlp_nodename, sizeof(struct lpfc_name));
3810                fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3811                fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3812                fast_path_evt->un.read_check_error.fcpiparam =
3813                        fcpi_parm;
3814        } else
3815                return;
3816
3817        fast_path_evt->vport = vport;
3818        spin_lock_irqsave(&phba->hbalock, flags);
3819        list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3820        spin_unlock_irqrestore(&phba->hbalock, flags);
3821        lpfc_worker_wake_up(phba);
3822        return;
3823}
3824
3825/**
3826 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3827 * @phba: The HBA for which this call is being executed.
3828 * @psb: The scsi buffer which is going to be un-mapped.
3829 *
3830 * This routine does DMA un-mapping of scatter gather list of scsi command
3831 * field of @lpfc_cmd for device with SLI-3 interface spec.
3832 **/
3833static void
3834lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3835{
3836        /*
3837         * There are only two special cases to consider.  (1) the scsi command
3838         * requested scatter-gather usage or (2) the scsi command allocated
3839         * a request buffer, but did not request use_sg.  There is a third
3840         * case, but it does not require resource deallocation.
3841         */
3842        if (psb->seg_cnt > 0)
3843                scsi_dma_unmap(psb->pCmd);
3844        if (psb->prot_seg_cnt > 0)
3845                dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3846                                scsi_prot_sg_count(psb->pCmd),
3847                                psb->pCmd->sc_data_direction);
3848}
3849
3850/**
3851 * lpfc_unblock_requests - allow further commands to be queued.
3852 * @phba: pointer to phba object
3853 *
3854 * For single vport, just call scsi_unblock_requests on physical port.
3855 * For multiple vports, send scsi_unblock_requests for all the vports.
3856 */
3857void
3858lpfc_unblock_requests(struct lpfc_hba *phba)
3859{
3860        struct lpfc_vport **vports;
3861        struct Scsi_Host  *shost;
3862        int i;
3863
3864        if (phba->sli_rev == LPFC_SLI_REV4 &&
3865            !phba->sli4_hba.max_cfg_param.vpi_used) {
3866                shost = lpfc_shost_from_vport(phba->pport);
3867                scsi_unblock_requests(shost);
3868                return;
3869        }
3870
3871        vports = lpfc_create_vport_work_array(phba);
3872        if (vports != NULL)
3873                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3874                        shost = lpfc_shost_from_vport(vports[i]);
3875                        scsi_unblock_requests(shost);
3876                }
3877        lpfc_destroy_vport_work_array(phba, vports);
3878}
3879
3880/**
3881 * lpfc_block_requests - prevent further commands from being queued.
3882 * @phba: pointer to phba object
3883 *
3884 * For single vport, just call scsi_block_requests on physical port.
3885 * For multiple vports, send scsi_block_requests for all the vports.
3886 */
3887void
3888lpfc_block_requests(struct lpfc_hba *phba)
3889{
3890        struct lpfc_vport **vports;
3891        struct Scsi_Host  *shost;
3892        int i;
3893
3894        if (atomic_read(&phba->cmf_stop_io))
3895                return;
3896
3897        if (phba->sli_rev == LPFC_SLI_REV4 &&
3898            !phba->sli4_hba.max_cfg_param.vpi_used) {
3899                shost = lpfc_shost_from_vport(phba->pport);
3900                scsi_block_requests(shost);
3901                return;
3902        }
3903
3904        vports = lpfc_create_vport_work_array(phba);
3905        if (vports != NULL)
3906                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3907                        shost = lpfc_shost_from_vport(vports[i]);
3908                        scsi_block_requests(shost);
3909                }
3910        lpfc_destroy_vport_work_array(phba, vports);
3911}
3912
3913/**
3914 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
3915 * @phba: The HBA for which this call is being executed.
3916 * @time: The latency of the IO that completed (in ns)
3917 * @size: The size of the IO that completed
3918 * @shost: SCSI host the IO completed on (NULL for a NVME IO)
3919 *
3920 * The routine adjusts the various Burst and Bandwidth counters used in
3921 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
3922 * that means the IO was never issued to the HBA, so this routine is
3923 * just being called to cleanup the counter from a previous
3924 * lpfc_update_cmf_cmd call.
3925 */
3926int
3927lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3928                     uint64_t time, uint32_t size, struct Scsi_Host *shost)
3929{
3930        struct lpfc_cgn_stat *cgs;
3931
3932        if (time != LPFC_CGN_NOT_SENT) {
3933                /* lat is ns coming in, save latency in us */
3934                if (time < 1000)
3935                        time = 1;
3936                else
3937                        time = div_u64(time + 500, 1000); /* round it */
3938
3939                cgs = this_cpu_ptr(phba->cmf_stat);
3940                atomic64_add(size, &cgs->rcv_bytes);
3941                atomic64_add(time, &cgs->rx_latency);
3942                atomic_inc(&cgs->rx_io_cnt);
3943        }
3944        return 0;
3945}
3946
3947/**
3948 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
3949 * @phba: The HBA for which this call is being executed.
3950 * @size: The size of the IO that will be issued
3951 *
3952 * The routine adjusts the various Burst and Bandwidth counters used in
3953 * Congestion management and E2E.
3954 */
3955int
3956lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3957{
3958        uint64_t total;
3959        struct lpfc_cgn_stat *cgs;
3960        int cpu;
3961
3962        /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
3963        if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3964            phba->cmf_max_bytes_per_interval) {
3965                total = 0;
3966                for_each_present_cpu(cpu) {
3967                        cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3968                        total += atomic64_read(&cgs->total_bytes);
3969                }
3970                if (total >= phba->cmf_max_bytes_per_interval) {
3971                        if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3972                                lpfc_block_requests(phba);
3973                                phba->cmf_last_ts =
3974                                        lpfc_calc_cmf_latency(phba);
3975                        }
3976                        atomic_inc(&phba->cmf_busy);
3977                        return -EBUSY;
3978                }
3979                if (size > atomic_read(&phba->rx_max_read_cnt))
3980                        atomic_set(&phba->rx_max_read_cnt, size);
3981        }
3982
3983        cgs = this_cpu_ptr(phba->cmf_stat);
3984        atomic64_add(size, &cgs->total_bytes);
3985        return 0;
3986}
3987
3988/**
3989 * lpfc_handle_fcp_err - FCP response handler
3990 * @vport: The virtual port for which this call is being executed.
3991 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3992 * @fcpi_parm: FCP Initiator parameter.
3993 *
3994 * This routine is called to process response IOCB with status field
3995 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3996 * based upon SCSI and FCP error.
3997 **/
3998static void
3999lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4000                    uint32_t fcpi_parm)
4001{
4002        struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
4003        struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
4004        struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4005        uint32_t resp_info = fcprsp->rspStatus2;
4006        uint32_t scsi_status = fcprsp->rspStatus3;
4007        uint32_t *lp;
4008        uint32_t host_status = DID_OK;
4009        uint32_t rsplen = 0;
4010        uint32_t fcpDl;
4011        uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
4012
4013
4014        /*
4015         *  If this is a task management command, there is no
4016         *  scsi packet associated with this lpfc_cmd.  The driver
4017         *  consumes it.
4018         */
4019        if (fcpcmd->fcpCntl2) {
4020                scsi_status = 0;
4021                goto out;
4022        }
4023
4024        if (resp_info & RSP_LEN_VALID) {
4025                rsplen = be32_to_cpu(fcprsp->rspRspLen);
4026                if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
4027                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4028                                         "2719 Invalid response length: "
4029                                         "tgt x%x lun x%llx cmnd x%x rsplen "
4030                                         "x%x\n", cmnd->device->id,
4031                                         cmnd->device->lun, cmnd->cmnd[0],
4032                                         rsplen);
4033                        host_status = DID_ERROR;
4034                        goto out;
4035                }
4036                if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
4037                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4038                                 "2757 Protocol failure detected during "
4039                                 "processing of FCP I/O op: "
4040                                 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
4041                                 cmnd->device->id,
4042                                 cmnd->device->lun, cmnd->cmnd[0],
4043                                 fcprsp->rspInfo3);
4044                        host_status = DID_ERROR;
4045                        goto out;
4046                }
4047        }
4048
4049        if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
4050                uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
4051                if (snslen > SCSI_SENSE_BUFFERSIZE)
4052                        snslen = SCSI_SENSE_BUFFERSIZE;
4053
4054                if (resp_info & RSP_LEN_VALID)
4055                  rsplen = be32_to_cpu(fcprsp->rspRspLen);
4056                memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
4057        }
4058        lp = (uint32_t *)cmnd->sense_buffer;
4059
4060        /* special handling for under run conditions */
4061        if (!scsi_status && (resp_info & RESID_UNDER)) {
4062                /* don't log under runs if fcp set... */
4063                if (vport->cfg_log_verbose & LOG_FCP)
4064                        logit = LOG_FCP_ERROR;
4065                /* unless operator says so */
4066                if (vport->cfg_log_verbose & LOG_FCP_UNDER)
4067                        logit = LOG_FCP_UNDER;
4068        }
4069
4070        lpfc_printf_vlog(vport, KERN_WARNING, logit,
4071                         "9024 FCP command x%x failed: x%x SNS x%x x%x "
4072                         "Data: x%x x%x x%x x%x x%x\n",
4073                         cmnd->cmnd[0], scsi_status,
4074                         be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
4075                         be32_to_cpu(fcprsp->rspResId),
4076                         be32_to_cpu(fcprsp->rspSnsLen),
4077                         be32_to_cpu(fcprsp->rspRspLen),
4078                         fcprsp->rspInfo3);
4079
4080        scsi_set_resid(cmnd, 0);
4081        fcpDl = be32_to_cpu(fcpcmd->fcpDl);
4082        if (resp_info & RESID_UNDER) {
4083                scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
4084
4085                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
4086                                 "9025 FCP Underrun, expected %d, "
4087                                 "residual %d Data: x%x x%x x%x\n",
4088                                 fcpDl,
4089                                 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
4090                                 cmnd->underflow);
4091
4092                /*
4093                 * If there is an under run, check if under run reported by
4094                 * storage array is same as the under run reported by HBA.
4095                 * If this is not same, there is a dropped frame.
4096                 */
4097                if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
4098                        lpfc_printf_vlog(vport, KERN_WARNING,
4099                                         LOG_FCP | LOG_FCP_ERROR,
4100                                         "9026 FCP Read Check Error "
4101                                         "and Underrun Data: x%x x%x x%x x%x\n",
4102                                         fcpDl,
4103                                         scsi_get_resid(cmnd), fcpi_parm,
4104                                         cmnd->cmnd[0]);
4105                        scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4106                        host_status = DID_ERROR;
4107                }
4108                /*
4109                 * The cmnd->underflow is the minimum number of bytes that must
4110                 * be transferred for this command.  Provided a sense condition
4111                 * is not present, make sure the actual amount transferred is at
4112                 * least the underflow value or fail.
4113                 */
4114                if (!(resp_info & SNS_LEN_VALID) &&
4115                    (scsi_status == SAM_STAT_GOOD) &&
4116                    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
4117                     < cmnd->underflow)) {
4118                        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4119                                         "9027 FCP command x%x residual "
4120                                         "underrun converted to error "
4121                                         "Data: x%x x%x x%x\n",
4122                                         cmnd->cmnd[0], scsi_bufflen(cmnd),
4123                                         scsi_get_resid(cmnd), cmnd->underflow);
4124                        host_status = DID_ERROR;
4125                }
4126        } else if (resp_info & RESID_OVER) {
4127                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4128                                 "9028 FCP command x%x residual overrun error. "
4129                                 "Data: x%x x%x\n", cmnd->cmnd[0],
4130                                 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
4131                host_status = DID_ERROR;
4132
4133        /*
4134         * Check SLI validation that all the transfer was actually done
4135         * (fcpi_parm should be zero). Apply check only to reads.
4136         */
4137        } else if (fcpi_parm) {
4138                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4139                                 "9029 FCP %s Check Error Data: "
4140                                 "x%x x%x x%x x%x x%x\n",
4141                                 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4142                                 "Read" : "Write"),
4143                                 fcpDl, be32_to_cpu(fcprsp->rspResId),
4144                                 fcpi_parm, cmnd->cmnd[0], scsi_status);
4145
4146                /* There is some issue with the LPe12000 that causes it
4147                 * to miscalculate the fcpi_parm and falsely trip this
4148                 * recovery logic.  Detect this case and don't error when true.
4149                 */
4150                if (fcpi_parm > fcpDl)
4151                        goto out;
4152
4153                switch (scsi_status) {
4154                case SAM_STAT_GOOD:
4155                case SAM_STAT_CHECK_CONDITION:
4156                        /* Fabric dropped a data frame. Fail any successful
4157                         * command in which we detected dropped frames.
4158                         * A status of good or some check conditions could
4159                         * be considered a successful command.
4160                         */
4161                        host_status = DID_ERROR;
4162                        break;
4163                }
4164                scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4165        }
4166
4167 out:
4168        cmnd->result = host_status << 16 | scsi_status;
4169        lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4170}
4171
4172/**
4173 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4174 * @phba: The hba for which this call is being executed.
4175 * @pwqeIn: The command WQE for the scsi cmnd.
4176 * @wcqe: Pointer to driver response CQE object.
4177 *
4178 * This routine assigns scsi command result by looking into response WQE
4179 * status field appropriately. This routine handles QUEUE FULL condition as
4180 * well by ramping down device queue depth.
4181 **/
4182static void
4183lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4184                         struct lpfc_wcqe_complete *wcqe)
4185{
4186        struct lpfc_io_buf *lpfc_cmd =
4187                (struct lpfc_io_buf *)pwqeIn->context1;
4188        struct lpfc_vport *vport = pwqeIn->vport;
4189        struct lpfc_rport_data *rdata;
4190        struct lpfc_nodelist *ndlp;
4191        struct scsi_cmnd *cmd;
4192        unsigned long flags;
4193        struct lpfc_fast_path_event *fast_path_evt;
4194        struct Scsi_Host *shost;
4195        u32 logit = LOG_FCP;
4196        u32 status, idx;
4197        unsigned long iflags = 0;
4198        u32 lat;
4199        u8 wait_xb_clr = 0;
4200
4201        /* Sanity check on return of outstanding command */
4202        if (!lpfc_cmd) {
4203                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4204                                 "9032 Null lpfc_cmd pointer. No "
4205                                 "release, skip completion\n");
4206                return;
4207        }
4208
4209        rdata = lpfc_cmd->rdata;
4210        ndlp = rdata->pnode;
4211
4212        if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4213                /* TOREMOVE - currently this flag is checked during
4214                 * the release of lpfc_iocbq. Remove once we move
4215                 * to lpfc_wqe_job construct.
4216                 *
4217                 * This needs to be done outside buf_lock
4218                 */
4219                spin_lock_irqsave(&phba->hbalock, iflags);
4220                lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4221                spin_unlock_irqrestore(&phba->hbalock, iflags);
4222        }
4223
4224        /* Guard against abort handler being called at same time */
4225        spin_lock(&lpfc_cmd->buf_lock);
4226
4227        /* Sanity check on return of outstanding command */
4228        cmd = lpfc_cmd->pCmd;
4229        if (!cmd) {
4230                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4231                                 "9042 I/O completion: Not an active IO\n");
4232                spin_unlock(&lpfc_cmd->buf_lock);
4233                lpfc_release_scsi_buf(phba, lpfc_cmd);
4234                return;
4235        }
4236        idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4237        if (phba->sli4_hba.hdwq)
4238                phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4239
4240#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4241        if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4242                this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4243#endif
4244        shost = cmd->device->host;
4245
4246        status = bf_get(lpfc_wcqe_c_status, wcqe);
4247        lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4248        lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4249
4250        lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4251        if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4252                lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4253                if (phba->cfg_fcp_wait_abts_rsp)
4254                        wait_xb_clr = 1;
4255        }
4256
4257#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4258        if (lpfc_cmd->prot_data_type) {
4259                struct scsi_dif_tuple *src = NULL;
4260
4261                src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4262                /*
4263                 * Used to restore any changes to protection
4264                 * data for error injection.
4265                 */
4266                switch (lpfc_cmd->prot_data_type) {
4267                case LPFC_INJERR_REFTAG:
4268                        src->ref_tag =
4269                                lpfc_cmd->prot_data;
4270                        break;
4271                case LPFC_INJERR_APPTAG:
4272                        src->app_tag =
4273                                (uint16_t)lpfc_cmd->prot_data;
4274                        break;
4275                case LPFC_INJERR_GUARD:
4276                        src->guard_tag =
4277                                (uint16_t)lpfc_cmd->prot_data;
4278                        break;
4279                default:
4280                        break;
4281                }
4282
4283                lpfc_cmd->prot_data = 0;
4284                lpfc_cmd->prot_data_type = 0;
4285                lpfc_cmd->prot_data_segment = NULL;
4286        }
4287#endif
4288        if (unlikely(lpfc_cmd->status)) {
4289                if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4290                    (lpfc_cmd->result & IOERR_DRVR_MASK))
4291                        lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4292                else if (lpfc_cmd->status >= IOSTAT_CNT)
4293                        lpfc_cmd->status = IOSTAT_DEFAULT;
4294                if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4295                    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4296                    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4297                    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4298                        logit = 0;
4299                else
4300                        logit = LOG_FCP | LOG_FCP_UNDER;
4301                lpfc_printf_vlog(vport, KERN_WARNING, logit,
4302                                 "9034 FCP cmd x%x failed <%d/%lld> "
4303                                 "status: x%x result: x%x "
4304                                 "sid: x%x did: x%x oxid: x%x "
4305                                 "Data: x%x x%x x%x\n",
4306                                 cmd->cmnd[0],
4307                                 cmd->device ? cmd->device->id : 0xffff,
4308                                 cmd->device ? cmd->device->lun : 0xffff,
4309                                 lpfc_cmd->status, lpfc_cmd->result,
4310                                 vport->fc_myDID,
4311                                 (ndlp) ? ndlp->nlp_DID : 0,
4312                                 lpfc_cmd->cur_iocbq.sli4_xritag,
4313                                 wcqe->parameter, wcqe->total_data_placed,
4314                                 lpfc_cmd->cur_iocbq.iotag);
4315        }
4316
4317        switch (lpfc_cmd->status) {
4318        case IOSTAT_SUCCESS:
4319                cmd->result = DID_OK << 16;
4320                break;
4321        case IOSTAT_FCP_RSP_ERROR:
4322                lpfc_handle_fcp_err(vport, lpfc_cmd,
4323                                    pwqeIn->wqe.fcp_iread.total_xfer_len -
4324                                    wcqe->total_data_placed);
4325                break;
4326        case IOSTAT_NPORT_BSY:
4327        case IOSTAT_FABRIC_BSY:
4328                cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4329                fast_path_evt = lpfc_alloc_fast_evt(phba);
4330                if (!fast_path_evt)
4331                        break;
4332                fast_path_evt->un.fabric_evt.event_type =
4333                        FC_REG_FABRIC_EVENT;
4334                fast_path_evt->un.fabric_evt.subcategory =
4335                        (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4336                        LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4337                if (ndlp) {
4338                        memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4339                               &ndlp->nlp_portname,
4340                                sizeof(struct lpfc_name));
4341                        memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4342                               &ndlp->nlp_nodename,
4343                                sizeof(struct lpfc_name));
4344                }
4345                fast_path_evt->vport = vport;
4346                fast_path_evt->work_evt.evt =
4347                        LPFC_EVT_FASTPATH_MGMT_EVT;
4348                spin_lock_irqsave(&phba->hbalock, flags);
4349                list_add_tail(&fast_path_evt->work_evt.evt_listp,
4350                              &phba->work_list);
4351                spin_unlock_irqrestore(&phba->hbalock, flags);
4352                lpfc_worker_wake_up(phba);
4353                lpfc_printf_vlog(vport, KERN_WARNING, logit,
4354                                 "9035 Fabric/Node busy FCP cmd x%x failed"
4355                                 " <%d/%lld> "
4356                                 "status: x%x result: x%x "
4357                                 "sid: x%x did: x%x oxid: x%x "
4358                                 "Data: x%x x%x x%x\n",
4359                                 cmd->cmnd[0],
4360                                 cmd->device ? cmd->device->id : 0xffff,
4361                                 cmd->device ? cmd->device->lun : 0xffff,
4362                                 lpfc_cmd->status, lpfc_cmd->result,
4363                                 vport->fc_myDID,
4364                                 (ndlp) ? ndlp->nlp_DID : 0,
4365                                 lpfc_cmd->cur_iocbq.sli4_xritag,
4366                                 wcqe->parameter,
4367                                 wcqe->total_data_placed,
4368                                 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4369                break;
4370        case IOSTAT_REMOTE_STOP:
4371                if (ndlp) {
4372                        /* This I/O was aborted by the target, we don't
4373                         * know the rxid and because we did not send the
4374                         * ABTS we cannot generate and RRQ.
4375                         */
4376                        lpfc_set_rrq_active(phba, ndlp,
4377                                            lpfc_cmd->cur_iocbq.sli4_lxritag,
4378                                            0, 0);
4379                }
4380                fallthrough;
4381        case IOSTAT_LOCAL_REJECT:
4382                if (lpfc_cmd->result & IOERR_DRVR_MASK)
4383                        lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4384                if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4385                    lpfc_cmd->result ==
4386                    IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4387                    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4388                    lpfc_cmd->result ==
4389                    IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4390                        cmd->result = DID_NO_CONNECT << 16;
4391                        break;
4392                }
4393                if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4394                    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4395                    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4396                    lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
4397                    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4398                        cmd->result = DID_REQUEUE << 16;
4399                        break;
4400                }
4401                if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4402                     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4403                     status == CQE_STATUS_DI_ERROR) {
4404                        if (scsi_get_prot_op(cmd) !=
4405                            SCSI_PROT_NORMAL) {
4406                                /*
4407                                 * This is a response for a BG enabled
4408                                 * cmd. Parse BG error
4409                                 */
4410                                lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
4411                                                       wcqe);
4412                                break;
4413                        }
4414                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4415                                 "9040 non-zero BGSTAT on unprotected cmd\n");
4416                }
4417                lpfc_printf_vlog(vport, KERN_WARNING, logit,
4418                                 "9036 Local Reject FCP cmd x%x failed"
4419                                 " <%d/%lld> "
4420                                 "status: x%x result: x%x "
4421                                 "sid: x%x did: x%x oxid: x%x "
4422                                 "Data: x%x x%x x%x\n",
4423                                 cmd->cmnd[0],
4424                                 cmd->device ? cmd->device->id : 0xffff,
4425                                 cmd->device ? cmd->device->lun : 0xffff,
4426                                 lpfc_cmd->status, lpfc_cmd->result,
4427                                 vport->fc_myDID,
4428                                 (ndlp) ? ndlp->nlp_DID : 0,
4429                                 lpfc_cmd->cur_iocbq.sli4_xritag,
4430                                 wcqe->parameter,
4431                                 wcqe->total_data_placed,
4432                                 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4433                fallthrough;
4434        default:
4435                if (lpfc_cmd->status >= IOSTAT_CNT)
4436                        lpfc_cmd->status = IOSTAT_DEFAULT;
4437                cmd->result = DID_ERROR << 16;
4438                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4439                                 "9037 FCP Completion Error: xri %x "
4440                                 "status x%x result x%x [x%x] "
4441                                 "placed x%x\n",
4442                                 lpfc_cmd->cur_iocbq.sli4_xritag,
4443                                 lpfc_cmd->status, lpfc_cmd->result,
4444                                 wcqe->parameter,
4445                                 wcqe->total_data_placed);
4446        }
4447        if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4448                u32 *lp = (u32 *)cmd->sense_buffer;
4449
4450                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4451                                 "9039 Iodone <%d/%llu> cmd x%px, error "
4452                                 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
4453                                 cmd->device->id, cmd->device->lun, cmd,
4454                                 cmd->result, *lp, *(lp + 3),
4455                                 (u64)scsi_get_lba(cmd),
4456                                 cmd->retries, scsi_get_resid(cmd));
4457        }
4458
4459        lpfc_update_stats(vport, lpfc_cmd);
4460
4461        if (vport->cfg_max_scsicmpl_time &&
4462            time_after(jiffies, lpfc_cmd->start_time +
4463            msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4464                spin_lock_irqsave(shost->host_lock, flags);
4465                if (ndlp) {
4466                        if (ndlp->cmd_qdepth >
4467                                atomic_read(&ndlp->cmd_pending) &&
4468                                (atomic_read(&ndlp->cmd_pending) >
4469                                LPFC_MIN_TGT_QDEPTH) &&
4470                                (cmd->cmnd[0] == READ_10 ||
4471                                cmd->cmnd[0] == WRITE_10))
4472                                ndlp->cmd_qdepth =
4473                                        atomic_read(&ndlp->cmd_pending);
4474
4475                        ndlp->last_change_time = jiffies;
4476                }
4477                spin_unlock_irqrestore(shost->host_lock, flags);
4478        }
4479        lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4480
4481#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4482        if (lpfc_cmd->ts_cmd_start) {
4483                lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4484                lpfc_cmd->ts_data_io = ktime_get_ns();
4485                phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4486                lpfc_io_ktime(phba, lpfc_cmd);
4487        }
4488#endif
4489        if (likely(!wait_xb_clr))
4490                lpfc_cmd->pCmd = NULL;
4491        spin_unlock(&lpfc_cmd->buf_lock);
4492
4493        /* Check if IO qualified for CMF */
4494        if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4495            cmd->sc_data_direction == DMA_FROM_DEVICE &&
4496            (scsi_sg_count(cmd))) {
4497                /* Used when calculating average latency */
4498                lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4499                lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4500        }
4501
4502        if (wait_xb_clr)
4503                goto out;
4504
4505        /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4506        scsi_done(cmd);
4507
4508        /*
4509         * If there is an abort thread waiting for command completion
4510         * wake up the thread.
4511         */
4512        spin_lock(&lpfc_cmd->buf_lock);
4513        lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4514        if (lpfc_cmd->waitq)
4515                wake_up(lpfc_cmd->waitq);
4516        spin_unlock(&lpfc_cmd->buf_lock);
4517out:
4518        lpfc_release_scsi_buf(phba, lpfc_cmd);
4519}
4520
4521/**
4522 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4523 * @phba: The Hba for which this call is being executed.
4524 * @pIocbIn: The command IOCBQ for the scsi cmnd.
4525 * @pIocbOut: The response IOCBQ for the scsi cmnd.
4526 *
4527 * This routine assigns scsi command result by looking into response IOCB
4528 * status field appropriately. This routine handles QUEUE FULL condition as
4529 * well by ramping down device queue depth.
4530 **/
4531static void
4532lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4533                        struct lpfc_iocbq *pIocbOut)
4534{
4535        struct lpfc_io_buf *lpfc_cmd =
4536                (struct lpfc_io_buf *) pIocbIn->context1;
4537        struct lpfc_vport      *vport = pIocbIn->vport;
4538        struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4539        struct lpfc_nodelist *pnode = rdata->pnode;
4540        struct scsi_cmnd *cmd;
4541        unsigned long flags;
4542        struct lpfc_fast_path_event *fast_path_evt;
4543        struct Scsi_Host *shost;
4544        int idx;
4545        uint32_t logit = LOG_FCP;
4546
4547        /* Guard against abort handler being called at same time */
4548        spin_lock(&lpfc_cmd->buf_lock);
4549
4550        /* Sanity check on return of outstanding command */
4551        cmd = lpfc_cmd->pCmd;
4552        if (!cmd || !phba) {
4553                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4554                                 "2621 IO completion: Not an active IO\n");
4555                spin_unlock(&lpfc_cmd->buf_lock);
4556                return;
4557        }
4558
4559        idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4560        if (phba->sli4_hba.hdwq)
4561                phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4562
4563#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4564        if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4565                this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4566#endif
4567        shost = cmd->device->host;
4568
4569        lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4570        lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4571        /* pick up SLI4 exchange busy status from HBA */
4572        lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4573        if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
4574                lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4575
4576#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4577        if (lpfc_cmd->prot_data_type) {
4578                struct scsi_dif_tuple *src = NULL;
4579
4580                src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4581                /*
4582                 * Used to restore any changes to protection
4583                 * data for error injection.
4584                 */
4585                switch (lpfc_cmd->prot_data_type) {
4586                case LPFC_INJERR_REFTAG:
4587                        src->ref_tag =
4588                                lpfc_cmd->prot_data;
4589                        break;
4590                case LPFC_INJERR_APPTAG:
4591                        src->app_tag =
4592                                (uint16_t)lpfc_cmd->prot_data;
4593                        break;
4594                case LPFC_INJERR_GUARD:
4595                        src->guard_tag =
4596                                (uint16_t)lpfc_cmd->prot_data;
4597                        break;
4598                default:
4599                        break;
4600                }
4601
4602                lpfc_cmd->prot_data = 0;
4603                lpfc_cmd->prot_data_type = 0;
4604                lpfc_cmd->prot_data_segment = NULL;
4605        }
4606#endif
4607
4608        if (unlikely(lpfc_cmd->status)) {
4609                if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4610                    (lpfc_cmd->result & IOERR_DRVR_MASK))
4611                        lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4612                else if (lpfc_cmd->status >= IOSTAT_CNT)
4613                        lpfc_cmd->status = IOSTAT_DEFAULT;
4614                if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4615                    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4616                    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4617                    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4618                        logit = 0;
4619                else
4620                        logit = LOG_FCP | LOG_FCP_UNDER;
4621                lpfc_printf_vlog(vport, KERN_WARNING, logit,
4622                         "9030 FCP cmd x%x failed <%d/%lld> "
4623                         "status: x%x result: x%x "
4624                         "sid: x%x did: x%x oxid: x%x "
4625                         "Data: x%x x%x\n",
4626                         cmd->cmnd[0],
4627                         cmd->device ? cmd->device->id : 0xffff,
4628                         cmd->device ? cmd->device->lun : 0xffff,
4629                         lpfc_cmd->status, lpfc_cmd->result,
4630                         vport->fc_myDID,
4631                         (pnode) ? pnode->nlp_DID : 0,
4632                         phba->sli_rev == LPFC_SLI_REV4 ?
4633                             lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4634                         pIocbOut->iocb.ulpContext,
4635                         lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4636
4637                switch (lpfc_cmd->status) {
4638                case IOSTAT_FCP_RSP_ERROR:
4639                        /* Call FCP RSP handler to determine result */
4640                        lpfc_handle_fcp_err(vport, lpfc_cmd,
4641                                            pIocbOut->iocb.un.fcpi.fcpi_parm);
4642                        break;
4643                case IOSTAT_NPORT_BSY:
4644                case IOSTAT_FABRIC_BSY:
4645                        cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4646                        fast_path_evt = lpfc_alloc_fast_evt(phba);
4647                        if (!fast_path_evt)
4648                                break;
4649                        fast_path_evt->un.fabric_evt.event_type =
4650                                FC_REG_FABRIC_EVENT;
4651                        fast_path_evt->un.fabric_evt.subcategory =
4652                                (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4653                                LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4654                        if (pnode) {
4655                                memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4656                                        &pnode->nlp_portname,
4657                                        sizeof(struct lpfc_name));
4658                                memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4659                                        &pnode->nlp_nodename,
4660                                        sizeof(struct lpfc_name));
4661                        }
4662                        fast_path_evt->vport = vport;
4663                        fast_path_evt->work_evt.evt =
4664                                LPFC_EVT_FASTPATH_MGMT_EVT;
4665                        spin_lock_irqsave(&phba->hbalock, flags);
4666                        list_add_tail(&fast_path_evt->work_evt.evt_listp,
4667                                &phba->work_list);
4668                        spin_unlock_irqrestore(&phba->hbalock, flags);
4669                        lpfc_worker_wake_up(phba);
4670                        break;
4671                case IOSTAT_LOCAL_REJECT:
4672                case IOSTAT_REMOTE_STOP:
4673                        if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4674                            lpfc_cmd->result ==
4675                                        IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4676                            lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4677                            lpfc_cmd->result ==
4678                                        IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4679                                cmd->result = DID_NO_CONNECT << 16;
4680                                break;
4681                        }
4682                        if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4683                            lpfc_cmd->result == IOERR_NO_RESOURCES ||
4684                            lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4685                            lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4686                                cmd->result = DID_REQUEUE << 16;
4687                                break;
4688                        }
4689                        if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4690                             lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4691                             pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4692                                if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4693                                        /*
4694                                         * This is a response for a BG enabled
4695                                         * cmd. Parse BG error
4696                                         */
4697                                        lpfc_parse_bg_err(phba, lpfc_cmd,
4698                                                        pIocbOut);
4699                                        break;
4700                                } else {
4701                                        lpfc_printf_vlog(vport, KERN_WARNING,
4702                                                        LOG_BG,
4703                                                        "9031 non-zero BGSTAT "
4704                                                        "on unprotected cmd\n");
4705                                }
4706                        }
4707                        if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4708                                && (phba->sli_rev == LPFC_SLI_REV4)
4709                                && pnode) {
4710                                /* This IO was aborted by the target, we don't
4711                                 * know the rxid and because we did not send the
4712                                 * ABTS we cannot generate and RRQ.
4713                                 */
4714                                lpfc_set_rrq_active(phba, pnode,
4715                                        lpfc_cmd->cur_iocbq.sli4_lxritag,
4716                                        0, 0);
4717                        }
4718                        fallthrough;
4719                default:
4720                        cmd->result = DID_ERROR << 16;
4721                        break;
4722                }
4723
4724                if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4725                        cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4726                                      SAM_STAT_BUSY;
4727        } else
4728                cmd->result = DID_OK << 16;
4729
4730        if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4731                uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4732
4733                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4734                                 "0710 Iodone <%d/%llu> cmd x%px, error "
4735                                 "x%x SNS x%x x%x Data: x%x x%x\n",
4736                                 cmd->device->id, cmd->device->lun, cmd,
4737                                 cmd->result, *lp, *(lp + 3), cmd->retries,
4738                                 scsi_get_resid(cmd));
4739        }
4740
4741        lpfc_update_stats(vport, lpfc_cmd);
4742        if (vport->cfg_max_scsicmpl_time &&
4743           time_after(jiffies, lpfc_cmd->start_time +
4744                msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4745                spin_lock_irqsave(shost->host_lock, flags);
4746                if (pnode) {
4747                        if (pnode->cmd_qdepth >
4748                                atomic_read(&pnode->cmd_pending) &&
4749                                (atomic_read(&pnode->cmd_pending) >
4750                                LPFC_MIN_TGT_QDEPTH) &&
4751                                ((cmd->cmnd[0] == READ_10) ||
4752                                (cmd->cmnd[0] == WRITE_10)))
4753                                pnode->cmd_qdepth =
4754                                        atomic_read(&pnode->cmd_pending);
4755
4756                        pnode->last_change_time = jiffies;
4757                }
4758                spin_unlock_irqrestore(shost->host_lock, flags);
4759        }
4760        lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4761
4762        lpfc_cmd->pCmd = NULL;
4763        spin_unlock(&lpfc_cmd->buf_lock);
4764
4765#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4766        if (lpfc_cmd->ts_cmd_start) {
4767                lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4768                lpfc_cmd->ts_data_io = ktime_get_ns();
4769                phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4770                lpfc_io_ktime(phba, lpfc_cmd);
4771        }
4772#endif
4773
4774        /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4775        scsi_done(cmd);
4776
4777        /*
4778         * If there is an abort thread waiting for command completion
4779         * wake up the thread.
4780         */
4781        spin_lock(&lpfc_cmd->buf_lock);
4782        lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4783        if (lpfc_cmd->waitq)
4784                wake_up(lpfc_cmd->waitq);
4785        spin_unlock(&lpfc_cmd->buf_lock);
4786
4787        lpfc_release_scsi_buf(phba, lpfc_cmd);
4788}
4789
4790/**
4791 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4792 * @vport: Pointer to vport object.
4793 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4794 * @tmo: timeout value for the IO
4795 *
4796 * Based on the data-direction of the command, initialize IOCB
4797 * in the I/O buffer. Fill in the IOCB fields which are independent
4798 * of the scsi buffer
4799 *
4800 * RETURNS 0 - SUCCESS,
4801 **/
4802static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4803                                      struct lpfc_io_buf *lpfc_cmd,
4804                                      uint8_t tmo)
4805{
4806        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4807        struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4808        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4809        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4810        struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4811        int datadir = scsi_cmnd->sc_data_direction;
4812        u32 fcpdl;
4813
4814        piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4815
4816        /*
4817         * There are three possibilities here - use scatter-gather segment, use
4818         * the single mapping, or neither.  Start the lpfc command prep by
4819         * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4820         * data bde entry.
4821         */
4822        if (scsi_sg_count(scsi_cmnd)) {
4823                if (datadir == DMA_TO_DEVICE) {
4824                        iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4825                        iocb_cmd->ulpPU = PARM_READ_CHECK;
4826                        if (vport->cfg_first_burst_size &&
4827                            (pnode->nlp_flag & NLP_FIRSTBURST)) {
4828                                u32 xrdy_len;
4829
4830                                fcpdl = scsi_bufflen(scsi_cmnd);
4831                                xrdy_len = min(fcpdl,
4832                                               vport->cfg_first_burst_size);
4833                                piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4834                        }
4835                        fcp_cmnd->fcpCntl3 = WRITE_DATA;
4836                } else {
4837                        iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4838                        iocb_cmd->ulpPU = PARM_READ_CHECK;
4839                        fcp_cmnd->fcpCntl3 = READ_DATA;
4840                }
4841        } else {
4842                iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4843                iocb_cmd->un.fcpi.fcpi_parm = 0;
4844                iocb_cmd->ulpPU = 0;
4845                fcp_cmnd->fcpCntl3 = 0;
4846        }
4847
4848        /*
4849         * Finish initializing those IOCB fields that are independent
4850         * of the scsi_cmnd request_buffer
4851         */
4852        piocbq->iocb.ulpContext = pnode->nlp_rpi;
4853        if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4854                piocbq->iocb.ulpFCP2Rcvy = 1;
4855        else
4856                piocbq->iocb.ulpFCP2Rcvy = 0;
4857
4858        piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4859        piocbq->context1  = lpfc_cmd;
4860        if (!piocbq->iocb_cmpl)
4861                piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4862        piocbq->iocb.ulpTimeout = tmo;
4863        piocbq->vport = vport;
4864        return 0;
4865}
4866
4867/**
4868 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4869 * @vport: Pointer to vport object.
4870 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4871 * @tmo: timeout value for the IO
4872 *
4873 * Based on the data-direction of the command copy WQE template
4874 * to I/O buffer WQE. Fill in the WQE fields which are independent
4875 * of the scsi buffer
4876 *
4877 * RETURNS 0 - SUCCESS,
4878 **/
4879static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4880                                      struct lpfc_io_buf *lpfc_cmd,
4881                                      uint8_t tmo)
4882{
4883        struct lpfc_hba *phba = vport->phba;
4884        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4885        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4886        struct lpfc_sli4_hdw_queue *hdwq = NULL;
4887        struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4888        struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4889        union lpfc_wqe128 *wqe = &pwqeq->wqe;
4890        u16 idx = lpfc_cmd->hdwq_no;
4891        int datadir = scsi_cmnd->sc_data_direction;
4892
4893        hdwq = &phba->sli4_hba.hdwq[idx];
4894
4895        /* Initialize 64 bytes only */
4896        memset(wqe, 0, sizeof(union lpfc_wqe128));
4897
4898        /*
4899         * There are three possibilities here - use scatter-gather segment, use
4900         * the single mapping, or neither.
4901         */
4902        if (scsi_sg_count(scsi_cmnd)) {
4903                if (datadir == DMA_TO_DEVICE) {
4904                        /* From the iwrite template, initialize words 7 -  11 */
4905                        memcpy(&wqe->words[7],
4906                               &lpfc_iwrite_cmd_template.words[7],
4907                               sizeof(uint32_t) * 5);
4908
4909                        fcp_cmnd->fcpCntl3 = WRITE_DATA;
4910                        if (hdwq)
4911                                hdwq->scsi_cstat.output_requests++;
4912                } else {
4913                        /* From the iread template, initialize words 7 - 11 */
4914                        memcpy(&wqe->words[7],
4915                               &lpfc_iread_cmd_template.words[7],
4916                               sizeof(uint32_t) * 5);
4917
4918                        /* Word 7 */
4919                        bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4920
4921                        fcp_cmnd->fcpCntl3 = READ_DATA;
4922                        if (hdwq)
4923                                hdwq->scsi_cstat.input_requests++;
4924
4925                        /* For a CMF Managed port, iod must be zero'ed */
4926                        if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4927                                bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4928                                       LPFC_WQE_IOD_NONE);
4929                }
4930        } else {
4931                /* From the icmnd template, initialize words 4 - 11 */
4932                memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4933                       sizeof(uint32_t) * 8);
4934
4935                /* Word 7 */
4936                bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4937
4938                fcp_cmnd->fcpCntl3 = 0;
4939                if (hdwq)
4940                        hdwq->scsi_cstat.control_requests++;
4941        }
4942
4943        /*
4944         * Finish initializing those WQE fields that are independent
4945         * of the request_buffer
4946         */
4947
4948         /* Word 3 */
4949        bf_set(payload_offset_len, &wqe->fcp_icmd,
4950               sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4951
4952        /* Word 6 */
4953        bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4954               phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4955        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4956
4957        /* Word 7*/
4958        if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4959                bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4960
4961        bf_set(wqe_class, &wqe->generic.wqe_com,
4962               (pnode->nlp_fcp_info & 0x0f));
4963
4964         /* Word 8 */
4965        wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4966
4967        /* Word 9 */
4968        bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4969
4970        pwqeq->vport = vport;
4971        pwqeq->vport = vport;
4972        pwqeq->context1 = lpfc_cmd;
4973        pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4974        pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4975
4976        return 0;
4977}
4978
4979/**
4980 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4981 * @vport: The virtual port for which this call is being executed.
4982 * @lpfc_cmd: The scsi command which needs to send.
4983 * @pnode: Pointer to lpfc_nodelist.
4984 *
4985 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4986 * to transfer for device with SLI3 interface spec.
4987 **/
4988static int
4989lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4990                    struct lpfc_nodelist *pnode)
4991{
4992        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4993        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4994        u8 *ptr;
4995
4996        if (!pnode)
4997                return 0;
4998
4999        lpfc_cmd->fcp_rsp->rspSnsLen = 0;
5000        /* clear task management bits */
5001        lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
5002
5003        int_to_scsilun(lpfc_cmd->pCmd->device->lun,
5004                       &lpfc_cmd->fcp_cmnd->fcp_lun);
5005
5006        ptr = &fcp_cmnd->fcpCdb[0];
5007        memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
5008        if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
5009                ptr += scsi_cmnd->cmd_len;
5010                memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
5011        }
5012
5013        fcp_cmnd->fcpCntl1 = SIMPLE_Q;
5014
5015        lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
5016
5017        return 0;
5018}
5019
5020/**
5021 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
5022 * @vport: The virtual port for which this call is being executed.
5023 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5024 * @lun: Logical unit number.
5025 * @task_mgmt_cmd: SCSI task management command.
5026 *
5027 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
5028 * for device with SLI-3 interface spec.
5029 *
5030 * Return codes:
5031 *   0 - Error
5032 *   1 - Success
5033 **/
5034static int
5035lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
5036                             struct lpfc_io_buf *lpfc_cmd,
5037                             uint64_t lun,
5038                             uint8_t task_mgmt_cmd)
5039{
5040        struct lpfc_iocbq *piocbq;
5041        IOCB_t *piocb;
5042        struct fcp_cmnd *fcp_cmnd;
5043        struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
5044        struct lpfc_nodelist *ndlp = rdata->pnode;
5045
5046        if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
5047                return 0;
5048
5049        piocbq = &(lpfc_cmd->cur_iocbq);
5050        piocbq->vport = vport;
5051
5052        piocb = &piocbq->iocb;
5053
5054        fcp_cmnd = lpfc_cmd->fcp_cmnd;
5055        /* Clear out any old data in the FCP command area */
5056        memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
5057        int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
5058        fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
5059        if (vport->phba->sli_rev == 3 &&
5060            !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
5061                lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
5062        piocb->ulpCommand = CMD_FCP_ICMND64_CR;
5063        piocb->ulpContext = ndlp->nlp_rpi;
5064        if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5065                piocb->ulpContext =
5066                  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5067        }
5068        piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
5069        piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
5070        piocb->ulpPU = 0;
5071        piocb->un.fcpi.fcpi_parm = 0;
5072
5073        /* ulpTimeout is only one byte */
5074        if (lpfc_cmd->timeout > 0xff) {
5075                /*
5076                 * Do not timeout the command at the firmware level.
5077                 * The driver will provide the timeout mechanism.
5078                 */
5079                piocb->ulpTimeout = 0;
5080        } else
5081                piocb->ulpTimeout = lpfc_cmd->timeout;
5082
5083        if (vport->phba->sli_rev == LPFC_SLI_REV4)
5084                lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
5085
5086        return 1;
5087}
5088
5089/**
5090 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
5091 * @phba: The hba struct for which this call is being executed.
5092 * @dev_grp: The HBA PCI-Device group number.
5093 *
5094 * This routine sets up the SCSI interface API function jump table in @phba
5095 * struct.
5096 * Returns: 0 - success, -ENODEV - failure.
5097 **/
5098int
5099lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5100{
5101
5102        phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
5103
5104        switch (dev_grp) {
5105        case LPFC_PCI_DEV_LP:
5106                phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
5107                phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
5108                phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
5109                phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
5110                phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
5111                break;
5112        case LPFC_PCI_DEV_OC:
5113                phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
5114                phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
5115                phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
5116                phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
5117                phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
5118                break;
5119        default:
5120                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5121                                "1418 Invalid HBA PCI-device group: 0x%x\n",
5122                                dev_grp);
5123                return -ENODEV;
5124        }
5125        phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5126        phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
5127        return 0;
5128}
5129
5130/**
5131 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
5132 * @phba: The Hba for which this call is being executed.
5133 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
5134 * @rspiocbq: Pointer to lpfc_iocbq data structure.
5135 *
5136 * This routine is IOCB completion routine for device reset and target reset
5137 * routine. This routine release scsi buffer associated with lpfc_cmd.
5138 **/
5139static void
5140lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5141                        struct lpfc_iocbq *cmdiocbq,
5142                        struct lpfc_iocbq *rspiocbq)
5143{
5144        struct lpfc_io_buf *lpfc_cmd =
5145                (struct lpfc_io_buf *) cmdiocbq->context1;
5146        if (lpfc_cmd)
5147                lpfc_release_scsi_buf(phba, lpfc_cmd);
5148        return;
5149}
5150
5151/**
5152 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5153 *                             if issuing a pci_bus_reset is possibly unsafe
5154 * @phba: lpfc_hba pointer.
5155 *
5156 * Description:
5157 * Walks the bus_list to ensure only PCI devices with Emulex
5158 * vendor id, device ids that support hot reset, and only one occurrence
5159 * of function 0.
5160 *
5161 * Returns:
5162 * -EBADSLT,  detected invalid device
5163 *      0,    successful
5164 */
5165int
5166lpfc_check_pci_resettable(struct lpfc_hba *phba)
5167{
5168        const struct pci_dev *pdev = phba->pcidev;
5169        struct pci_dev *ptr = NULL;
5170        u8 counter = 0;
5171
5172        /* Walk the list of devices on the pci_dev's bus */
5173        list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5174                /* Check for Emulex Vendor ID */
5175                if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5176                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5177                                        "8346 Non-Emulex vendor found: "
5178                                        "0x%04x\n", ptr->vendor);
5179                        return -EBADSLT;
5180                }
5181
5182                /* Check for valid Emulex Device ID */
5183                if (phba->sli_rev != LPFC_SLI_REV4 ||
5184                    phba->hba_flag & HBA_FCOE_MODE) {
5185                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5186                                        "8347 Incapable PCI reset device: "
5187                                        "0x%04x\n", ptr->device);
5188                        return -EBADSLT;
5189                }
5190
5191                /* Check for only one function 0 ID to ensure only one HBA on
5192                 * secondary bus
5193                 */
5194                if (ptr->devfn == 0) {
5195                        if (++counter > 1) {
5196                                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5197                                                "8348 More than one device on "
5198                                                "secondary bus found\n");
5199                                return -EBADSLT;
5200                        }
5201                }
5202        }
5203
5204        return 0;
5205}
5206
5207/**
5208 * lpfc_info - Info entry point of scsi_host_template data structure
5209 * @host: The scsi host for which this call is being executed.
5210 *
5211 * This routine provides module information about hba.
5212 *
5213 * Reutrn code:
5214 *   Pointer to char - Success.
5215 **/
5216const char *
5217lpfc_info(struct Scsi_Host *host)
5218{
5219        struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5220        struct lpfc_hba   *phba = vport->phba;
5221        int link_speed = 0;
5222        static char lpfcinfobuf[384];
5223        char tmp[384] = {0};
5224
5225        memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5226        if (phba && phba->pcidev){
5227                /* Model Description */
5228                scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5229                if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5230                    sizeof(lpfcinfobuf))
5231                        goto buffer_done;
5232
5233                /* PCI Info */
5234                scnprintf(tmp, sizeof(tmp),
5235                          " on PCI bus %02x device %02x irq %d",
5236                          phba->pcidev->bus->number, phba->pcidev->devfn,
5237                          phba->pcidev->irq);
5238                if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5239                    sizeof(lpfcinfobuf))
5240                        goto buffer_done;
5241
5242                /* Port Number */
5243                if (phba->Port[0]) {
5244                        scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5245                        if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5246                            sizeof(lpfcinfobuf))
5247                                goto buffer_done;
5248                }
5249
5250                /* Link Speed */
5251                link_speed = lpfc_sli_port_speed_get(phba);
5252                if (link_speed != 0) {
5253                        scnprintf(tmp, sizeof(tmp),
5254                                  " Logical Link Speed: %d Mbps", link_speed);
5255                        if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5256                            sizeof(lpfcinfobuf))
5257                                goto buffer_done;
5258                }
5259
5260                /* PCI resettable */
5261                if (!lpfc_check_pci_resettable(phba)) {
5262                        scnprintf(tmp, sizeof(tmp), " PCI resettable");
5263                        strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5264                }
5265        }
5266
5267buffer_done:
5268        return lpfcinfobuf;
5269}
5270
5271/**
5272 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
5273 * @phba: The Hba for which this call is being executed.
5274 *
5275 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
5276 * The default value of cfg_poll_tmo is 10 milliseconds.
5277 **/
5278static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5279{
5280        unsigned long  poll_tmo_expires =
5281                (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5282
5283        if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5284                mod_timer(&phba->fcp_poll_timer,
5285                          poll_tmo_expires);
5286}
5287
5288/**
5289 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5290 * @phba: The Hba for which this call is being executed.
5291 *
5292 * This routine starts the fcp_poll_timer of @phba.
5293 **/
5294void lpfc_poll_start_timer(struct lpfc_hba * phba)
5295{
5296        lpfc_poll_rearm_timer(phba);
5297}
5298
5299/**
5300 * lpfc_poll_timeout - Restart polling timer
5301 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5302 *
5303 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
5304 * and FCP Ring interrupt is disable.
5305 **/
5306void lpfc_poll_timeout(struct timer_list *t)
5307{
5308        struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5309
5310        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5311                lpfc_sli_handle_fast_ring_event(phba,
5312                        &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5313
5314                if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5315                        lpfc_poll_rearm_timer(phba);
5316        }
5317}
5318
5319/*
5320 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
5321 * @vport: The virtual port for which this call is being executed.
5322 * @hash: calculated hash value
5323 * @buf: uuid associated with the VE
5324 * Return the VMID entry associated with the UUID
5325 * Make sure to acquire the appropriate lock before invoking this routine.
5326 */
5327struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
5328                                              u32 hash, u8 *buf)
5329{
5330        struct lpfc_vmid *vmp;
5331
5332        hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
5333                if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
5334                        return vmp;
5335        }
5336        return NULL;
5337}
5338
5339/*
5340 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
5341 * @vport: The virtual port for which this call is being executed.
5342 * @hash - calculated hash value
5343 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5344 *
5345 * This routine will insert the newly acquired VMID entity in the hash table.
5346 * Make sure to acquire the appropriate lock before invoking this routine.
5347 */
5348static void
5349lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
5350                           struct lpfc_vmid *vmp)
5351{
5352        hash_add(vport->hash_table, &vmp->hnode, hash);
5353}
5354
5355/*
5356 * lpfc_vmid_hash_fn - create a hash value of the UUID
5357 * @vmid: uuid associated with the VE
5358 * @len: length of the VMID string
5359 * Returns the calculated hash value
5360 */
5361int lpfc_vmid_hash_fn(const char *vmid, int len)
5362{
5363        int c;
5364        int hash = 0;
5365
5366        if (len == 0)
5367                return 0;
5368        while (len--) {
5369                c = *vmid++;
5370                if (c >= 'A' && c <= 'Z')
5371                        c += 'a' - 'A';
5372
5373                hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
5374                        (c >> LPFC_VMID_HASH_SHIFT)) * 19;
5375        }
5376
5377        return hash & LPFC_VMID_HASH_MASK;
5378}
5379
5380/*
5381 * lpfc_vmid_update_entry - update the vmid entry in the hash table
5382 * @vport: The virtual port for which this call is being executed.
5383 * @cmd: address of scsi cmd descriptor
5384 * @vmp: Pointer to a VMID entry representing a VM sending I/O
5385 * @tag: VMID tag
5386 */
5387static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
5388                                   *cmd, struct lpfc_vmid *vmp,
5389                                   union lpfc_vmid_io_tag *tag)
5390{
5391        u64 *lta;
5392
5393        if (vport->vmid_priority_tagging)
5394                tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
5395        else
5396                tag->app_id = vmp->un.app_id;
5397
5398        if (cmd->sc_data_direction == DMA_TO_DEVICE)
5399                vmp->io_wr_cnt++;
5400        else
5401                vmp->io_rd_cnt++;
5402
5403        /* update the last access timestamp in the table */
5404        lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
5405        *lta = jiffies;
5406}
5407
5408static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
5409                                    struct lpfc_vmid *vmid)
5410{
5411        u32 hash;
5412        struct lpfc_vmid *pvmid;
5413
5414        if (vport->port_type == LPFC_PHYSICAL_PORT) {
5415                vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5416        } else {
5417                hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
5418                pvmid =
5419                    lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
5420                                                vmid->host_vmid);
5421                if (pvmid)
5422                        vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
5423                else
5424                        vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5425        }
5426}
5427
5428/*
5429 * lpfc_vmid_get_appid - get the VMID associated with the UUID
5430 * @vport: The virtual port for which this call is being executed.
5431 * @uuid: UUID associated with the VE
5432 * @cmd: address of scsi_cmd descriptor
5433 * @tag: VMID tag
5434 * Returns status of the function
5435 */
5436static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
5437                               scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
5438{
5439        struct lpfc_vmid *vmp = NULL;
5440        int hash, len, rc, i;
5441
5442        /* check if QFPA is complete */
5443        if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
5444              LPFC_VMID_QFPA_CMPL)) {
5445                vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5446                return -EAGAIN;
5447        }
5448
5449        /* search if the UUID has already been mapped to the VMID */
5450        len = strlen(uuid);
5451        hash = lpfc_vmid_hash_fn(uuid, len);
5452
5453        /* search for the VMID in the table */
5454        read_lock(&vport->vmid_lock);
5455        vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5456
5457        /* if found, check if its already registered  */
5458        if (vmp  && vmp->flag & LPFC_VMID_REGISTERED) {
5459                read_unlock(&vport->vmid_lock);
5460                lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5461                rc = 0;
5462        } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
5463                           vmp->flag & LPFC_VMID_DE_REGISTER)) {
5464                /* else if register or dereg request has already been sent */
5465                /* Hence VMID tag will not be added for this I/O */
5466                read_unlock(&vport->vmid_lock);
5467                rc = -EBUSY;
5468        } else {
5469                /* The VMID was not found in the hashtable. At this point, */
5470                /* drop the read lock first before proceeding further */
5471                read_unlock(&vport->vmid_lock);
5472                /* start the process to obtain one as per the */
5473                /* type of the VMID indicated */
5474                write_lock(&vport->vmid_lock);
5475                vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5476
5477                /* while the read lock was released, in case the entry was */
5478                /* added by other context or is in process of being added */
5479                if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5480                        lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5481                        write_unlock(&vport->vmid_lock);
5482                        return 0;
5483                } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
5484                        write_unlock(&vport->vmid_lock);
5485                        return -EBUSY;
5486                }
5487
5488                /* else search and allocate a free slot in the hash table */
5489                if (vport->cur_vmid_cnt < vport->max_vmid) {
5490                        for (i = 0; i < vport->max_vmid; i++) {
5491                                vmp = vport->vmid + i;
5492                                if (vmp->flag == LPFC_VMID_SLOT_FREE)
5493                                        break;
5494                        }
5495                        if (i == vport->max_vmid)
5496                                vmp = NULL;
5497                } else {
5498                        vmp = NULL;
5499                }
5500
5501                if (!vmp) {
5502                        write_unlock(&vport->vmid_lock);
5503                        return -ENOMEM;
5504                }
5505
5506                /* Add the vmid and register */
5507                lpfc_put_vmid_in_hashtable(vport, hash, vmp);
5508                vmp->vmid_len = len;
5509                memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
5510                vmp->io_rd_cnt = 0;
5511                vmp->io_wr_cnt = 0;
5512                vmp->flag = LPFC_VMID_SLOT_USED;
5513
5514                vmp->delete_inactive =
5515                        vport->vmid_inactivity_timeout ? 1 : 0;
5516
5517                /* if type priority tag, get next available VMID */
5518                if (lpfc_vmid_is_type_priority_tag(vport))
5519                        lpfc_vmid_assign_cs_ctl(vport, vmp);
5520
5521                /* allocate the per cpu variable for holding */
5522                /* the last access time stamp only if VMID is enabled */
5523                if (!vmp->last_io_time)
5524                        vmp->last_io_time = __alloc_percpu(sizeof(u64),
5525                                                           __alignof__(struct
5526                                                           lpfc_vmid));
5527                if (!vmp->last_io_time) {
5528                        hash_del(&vmp->hnode);
5529                        vmp->flag = LPFC_VMID_SLOT_FREE;
5530                        write_unlock(&vport->vmid_lock);
5531                        return -EIO;
5532                }
5533
5534                write_unlock(&vport->vmid_lock);
5535
5536                /* complete transaction with switch */
5537                if (lpfc_vmid_is_type_priority_tag(vport))
5538                        rc = lpfc_vmid_uvem(vport, vmp, true);
5539                else
5540                        rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
5541                if (!rc) {
5542                        write_lock(&vport->vmid_lock);
5543                        vport->cur_vmid_cnt++;
5544                        vmp->flag |= LPFC_VMID_REQ_REGISTER;
5545                        write_unlock(&vport->vmid_lock);
5546                } else {
5547                        write_lock(&vport->vmid_lock);
5548                        hash_del(&vmp->hnode);
5549                        vmp->flag = LPFC_VMID_SLOT_FREE;
5550                        free_percpu(vmp->last_io_time);
5551                        write_unlock(&vport->vmid_lock);
5552                        return -EIO;
5553                }
5554
5555                /* finally, enable the idle timer once */
5556                if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
5557                        mod_timer(&vport->phba->inactive_vmid_poll,
5558                                  jiffies +
5559                                  msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
5560                        vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
5561                }
5562        }
5563        return rc;
5564}
5565
5566/*
5567 * lpfc_is_command_vm_io - get the UUID from blk cgroup
5568 * @cmd: Pointer to scsi_cmnd data structure
5569 * Returns UUID if present, otherwise NULL
5570 */
5571static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5572{
5573        struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
5574
5575        return bio ? blkcg_get_fc_appid(bio) : NULL;
5576}
5577
5578/**
5579 * lpfc_queuecommand - scsi_host_template queuecommand entry point
5580 * @shost: kernel scsi host pointer.
5581 * @cmnd: Pointer to scsi_cmnd data structure.
5582 *
5583 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5584 * This routine prepares an IOCB from scsi command and provides to firmware.
5585 * The @done callback is invoked after driver finished processing the command.
5586 *
5587 * Return value :
5588 *   0 - Success
5589 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5590 **/
5591static int
5592lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5593{
5594        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5595        struct lpfc_hba   *phba = vport->phba;
5596        struct lpfc_rport_data *rdata;
5597        struct lpfc_nodelist *ndlp;
5598        struct lpfc_io_buf *lpfc_cmd;
5599        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5600        int err, idx;
5601        u8 *uuid = NULL;
5602        uint64_t start;
5603
5604        start = ktime_get_ns();
5605        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5606
5607        /* sanity check on references */
5608        if (unlikely(!rdata) || unlikely(!rport))
5609                goto out_fail_command;
5610
5611        err = fc_remote_port_chkready(rport);
5612        if (err) {
5613                cmnd->result = err;
5614                goto out_fail_command;
5615        }
5616        ndlp = rdata->pnode;
5617
5618        if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5619                (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5620
5621                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5622                                "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5623                                " op:%02x str=%s without registering for"
5624                                " BlockGuard - Rejecting command\n",
5625                                cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5626                                dif_op_str[scsi_get_prot_op(cmnd)]);
5627                goto out_fail_command;
5628        }
5629
5630        /*
5631         * Catch race where our node has transitioned, but the
5632         * transport is still transitioning.
5633         */
5634        if (!ndlp)
5635                goto out_tgt_busy1;
5636
5637        /* Check if IO qualifies for CMF */
5638        if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5639            cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5640            (scsi_sg_count(cmnd))) {
5641                /* Latency start time saved in rx_cmd_start later in routine */
5642                err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5643                if (err)
5644                        goto out_tgt_busy1;
5645        }
5646
5647        if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5648                if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5649                        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5650                                         "3377 Target Queue Full, scsi Id:%d "
5651                                         "Qdepth:%d Pending command:%d"
5652                                         " WWNN:%02x:%02x:%02x:%02x:"
5653                                         "%02x:%02x:%02x:%02x, "
5654                                         " WWPN:%02x:%02x:%02x:%02x:"
5655                                         "%02x:%02x:%02x:%02x",
5656                                         ndlp->nlp_sid, ndlp->cmd_qdepth,
5657                                         atomic_read(&ndlp->cmd_pending),
5658                                         ndlp->nlp_nodename.u.wwn[0],
5659                                         ndlp->nlp_nodename.u.wwn[1],
5660                                         ndlp->nlp_nodename.u.wwn[2],
5661                                         ndlp->nlp_nodename.u.wwn[3],
5662                                         ndlp->nlp_nodename.u.wwn[4],
5663                                         ndlp->nlp_nodename.u.wwn[5],
5664                                         ndlp->nlp_nodename.u.wwn[6],
5665                                         ndlp->nlp_nodename.u.wwn[7],
5666                                         ndlp->nlp_portname.u.wwn[0],
5667                                         ndlp->nlp_portname.u.wwn[1],
5668                                         ndlp->nlp_portname.u.wwn[2],
5669                                         ndlp->nlp_portname.u.wwn[3],
5670                                         ndlp->nlp_portname.u.wwn[4],
5671                                         ndlp->nlp_portname.u.wwn[5],
5672                                         ndlp->nlp_portname.u.wwn[6],
5673                                         ndlp->nlp_portname.u.wwn[7]);
5674                        goto out_tgt_busy2;
5675                }
5676        }
5677
5678        lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5679        if (lpfc_cmd == NULL) {
5680                lpfc_rampdown_queue_depth(phba);
5681
5682                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5683                                 "0707 driver's buffer pool is empty, "
5684                                 "IO busied\n");
5685                goto out_host_busy;
5686        }
5687        lpfc_cmd->rx_cmd_start = start;
5688
5689        /*
5690         * Store the midlayer's command structure for the completion phase
5691         * and complete the command initialization.
5692         */
5693        lpfc_cmd->pCmd  = cmnd;
5694        lpfc_cmd->rdata = rdata;
5695        lpfc_cmd->ndlp = ndlp;
5696        lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
5697        cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5698
5699        err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5700        if (err)
5701                goto out_host_busy_release_buf;
5702
5703        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5704                if (vport->phba->cfg_enable_bg) {
5705                        lpfc_printf_vlog(vport,
5706                                         KERN_INFO, LOG_SCSI_CMD,
5707                                         "9033 BLKGRD: rcvd %s cmd:x%x "
5708                                         "reftag x%x cnt %u pt %x\n",
5709                                         dif_op_str[scsi_get_prot_op(cmnd)],
5710                                         cmnd->cmnd[0],
5711                                         scsi_prot_ref_tag(cmnd),
5712                                         scsi_logical_block_count(cmnd),
5713                                         (cmnd->cmnd[1]>>5));
5714                }
5715                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5716        } else {
5717                if (vport->phba->cfg_enable_bg) {
5718                        lpfc_printf_vlog(vport,
5719                                         KERN_INFO, LOG_SCSI_CMD,
5720                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5721                                         "x%x reftag x%x cnt %u pt %x\n",
5722                                         cmnd->cmnd[0],
5723                                         scsi_prot_ref_tag(cmnd),
5724                                         scsi_logical_block_count(cmnd),
5725                                         (cmnd->cmnd[1]>>5));
5726                }
5727                err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5728        }
5729
5730        if (unlikely(err)) {
5731                if (err == 2) {
5732                        cmnd->result = DID_ERROR << 16;
5733                        goto out_fail_command_release_buf;
5734                }
5735                goto out_host_busy_free_buf;
5736        }
5737
5738
5739        /* check the necessary and sufficient condition to support VMID */
5740        if (lpfc_is_vmid_enabled(phba) &&
5741            (ndlp->vmid_support ||
5742             phba->pport->vmid_priority_tagging ==
5743             LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5744                /* is the I/O generated by a VM, get the associated virtual */
5745                /* entity id */
5746                uuid = lpfc_is_command_vm_io(cmnd);
5747
5748                if (uuid) {
5749                        err = lpfc_vmid_get_appid(vport, uuid, cmnd,
5750                                (union lpfc_vmid_io_tag *)
5751                                        &lpfc_cmd->cur_iocbq.vmid_tag);
5752                        if (!err)
5753                                lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
5754                }
5755        }
5756
5757        atomic_inc(&ndlp->cmd_pending);
5758#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5759        if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5760                this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5761#endif
5762        /* Issue I/O to adapter */
5763        err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
5764                                    &lpfc_cmd->cur_iocbq,
5765                                    SLI_IOCB_RET_IOCB);
5766#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5767        if (start) {
5768                lpfc_cmd->ts_cmd_start = start;
5769                lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5770                lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5771        } else {
5772                lpfc_cmd->ts_cmd_start = 0;
5773        }
5774#endif
5775        if (err) {
5776                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5777                                   "3376 FCP could not issue IOCB err %x "
5778                                   "FCP cmd x%x <%d/%llu> "
5779                                   "sid: x%x did: x%x oxid: x%x "
5780                                   "Data: x%x x%x x%x x%x\n",
5781                                   err, cmnd->cmnd[0],
5782                                   cmnd->device ? cmnd->device->id : 0xffff,
5783                                   cmnd->device ? cmnd->device->lun : (u64)-1,
5784                                   vport->fc_myDID, ndlp->nlp_DID,
5785                                   phba->sli_rev == LPFC_SLI_REV4 ?
5786                                   lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
5787                                   phba->sli_rev == LPFC_SLI_REV4 ?
5788                                   phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5789                                   lpfc_cmd->cur_iocbq.iocb.ulpContext,
5790                                   lpfc_cmd->cur_iocbq.iotag,
5791                                   phba->sli_rev == LPFC_SLI_REV4 ?
5792                                   bf_get(wqe_tmo,
5793                                   &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
5794                                   lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
5795                                   (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
5796
5797                goto out_host_busy_free_buf;
5798        }
5799
5800        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5801                lpfc_sli_handle_fast_ring_event(phba,
5802                        &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5803
5804                if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5805                        lpfc_poll_rearm_timer(phba);
5806        }
5807
5808        if (phba->cfg_xri_rebalancing)
5809                lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5810
5811        return 0;
5812
5813 out_host_busy_free_buf:
5814        idx = lpfc_cmd->hdwq_no;
5815        lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5816        if (phba->sli4_hba.hdwq) {
5817                switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5818                case WRITE_DATA:
5819                        phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5820                        break;
5821                case READ_DATA:
5822                        phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5823                        break;
5824                default:
5825                        phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5826                }
5827        }
5828 out_host_busy_release_buf:
5829        lpfc_release_scsi_buf(phba, lpfc_cmd);
5830 out_host_busy:
5831        lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5832                             shost);
5833        return SCSI_MLQUEUE_HOST_BUSY;
5834
5835 out_tgt_busy2:
5836        lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5837                             shost);
5838 out_tgt_busy1:
5839        return SCSI_MLQUEUE_TARGET_BUSY;
5840
5841 out_fail_command_release_buf:
5842        lpfc_release_scsi_buf(phba, lpfc_cmd);
5843        lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5844                             shost);
5845
5846 out_fail_command:
5847        scsi_done(cmnd);
5848        return 0;
5849}
5850
5851/*
5852 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5853 * @vport: The virtual port for which this call is being executed.
5854 */
5855void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5856{
5857        u32 bucket;
5858        struct lpfc_vmid *cur;
5859
5860        if (vport->port_type == LPFC_PHYSICAL_PORT)
5861                del_timer_sync(&vport->phba->inactive_vmid_poll);
5862
5863        kfree(vport->qfpa_res);
5864        kfree(vport->vmid_priority.vmid_range);
5865        kfree(vport->vmid);
5866
5867        if (!hash_empty(vport->hash_table))
5868                hash_for_each(vport->hash_table, bucket, cur, hnode)
5869                        hash_del(&cur->hnode);
5870
5871        vport->qfpa_res = NULL;
5872        vport->vmid_priority.vmid_range = NULL;
5873        vport->vmid = NULL;
5874        vport->cur_vmid_cnt = 0;
5875}
5876
5877/**
5878 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5879 * @cmnd: Pointer to scsi_cmnd data structure.
5880 *
5881 * This routine aborts @cmnd pending in base driver.
5882 *
5883 * Return code :
5884 *   0x2003 - Error
5885 *   0x2002 - Success
5886 **/
5887static int
5888lpfc_abort_handler(struct scsi_cmnd *cmnd)
5889{
5890        struct Scsi_Host  *shost = cmnd->device->host;
5891        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5892        struct lpfc_hba   *phba = vport->phba;
5893        struct lpfc_iocbq *iocb;
5894        struct lpfc_io_buf *lpfc_cmd;
5895        int ret = SUCCESS, status = 0;
5896        struct lpfc_sli_ring *pring_s4 = NULL;
5897        struct lpfc_sli_ring *pring = NULL;
5898        int ret_val;
5899        unsigned long flags;
5900        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5901
5902        status = fc_block_scsi_eh(cmnd);
5903        if (status != 0 && status != SUCCESS)
5904                return status;
5905
5906        lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5907        if (!lpfc_cmd)
5908                return ret;
5909
5910        spin_lock_irqsave(&phba->hbalock, flags);
5911        /* driver queued commands are in process of being flushed */
5912        if (phba->hba_flag & HBA_IOQ_FLUSH) {
5913                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5914                        "3168 SCSI Layer abort requested I/O has been "
5915                        "flushed by LLD.\n");
5916                ret = FAILED;
5917                goto out_unlock;
5918        }
5919
5920        /* Guard against IO completion being called at same time */
5921        spin_lock(&lpfc_cmd->buf_lock);
5922
5923        if (!lpfc_cmd->pCmd) {
5924                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5925                         "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5926                         "x%x ID %d LUN %llu\n",
5927                         SUCCESS, cmnd->device->id, cmnd->device->lun);
5928                goto out_unlock_buf;
5929        }
5930
5931        iocb = &lpfc_cmd->cur_iocbq;
5932        if (phba->sli_rev == LPFC_SLI_REV4) {
5933                pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5934                if (!pring_s4) {
5935                        ret = FAILED;
5936                        goto out_unlock_buf;
5937                }
5938                spin_lock(&pring_s4->ring_lock);
5939        }
5940        /* the command is in process of being cancelled */
5941        if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
5942                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5943                        "3169 SCSI Layer abort requested I/O has been "
5944                        "cancelled by LLD.\n");
5945                ret = FAILED;
5946                goto out_unlock_ring;
5947        }
5948        /*
5949         * If pCmd field of the corresponding lpfc_io_buf structure
5950         * points to a different SCSI command, then the driver has
5951         * already completed this command, but the midlayer did not
5952         * see the completion before the eh fired. Just return SUCCESS.
5953         */
5954        if (lpfc_cmd->pCmd != cmnd) {
5955                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5956                        "3170 SCSI Layer abort requested I/O has been "
5957                        "completed by LLD.\n");
5958                goto out_unlock_ring;
5959        }
5960
5961        BUG_ON(iocb->context1 != lpfc_cmd);
5962
5963        /* abort issued in recovery is still in progress */
5964        if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
5965                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5966                         "3389 SCSI Layer I/O Abort Request is pending\n");
5967                if (phba->sli_rev == LPFC_SLI_REV4)
5968                        spin_unlock(&pring_s4->ring_lock);
5969                spin_unlock(&lpfc_cmd->buf_lock);
5970                spin_unlock_irqrestore(&phba->hbalock, flags);
5971                goto wait_for_cmpl;
5972        }
5973
5974        lpfc_cmd->waitq = &waitq;
5975        if (phba->sli_rev == LPFC_SLI_REV4) {
5976                spin_unlock(&pring_s4->ring_lock);
5977                ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5978                                                      lpfc_sli4_abort_fcp_cmpl);
5979        } else {
5980                pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5981                ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5982                                                     lpfc_sli_abort_fcp_cmpl);
5983        }
5984
5985        /* Make sure HBA is alive */
5986        lpfc_issue_hb_tmo(phba);
5987
5988        if (ret_val != IOCB_SUCCESS) {
5989                /* Indicate the IO is not being aborted by the driver. */
5990                lpfc_cmd->waitq = NULL;
5991                spin_unlock(&lpfc_cmd->buf_lock);
5992                spin_unlock_irqrestore(&phba->hbalock, flags);
5993                ret = FAILED;
5994                goto out;
5995        }
5996
5997        /* no longer need the lock after this point */
5998        spin_unlock(&lpfc_cmd->buf_lock);
5999        spin_unlock_irqrestore(&phba->hbalock, flags);
6000
6001        if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6002                lpfc_sli_handle_fast_ring_event(phba,
6003                        &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6004
6005wait_for_cmpl:
6006        /*
6007         * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
6008         * for abort to complete.
6009         */
6010        wait_event_timeout(waitq,
6011                          (lpfc_cmd->pCmd != cmnd),
6012                           msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
6013
6014        spin_lock(&lpfc_cmd->buf_lock);
6015
6016        if (lpfc_cmd->pCmd == cmnd) {
6017                ret = FAILED;
6018                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6019                                 "0748 abort handler timed out waiting "
6020                                 "for aborting I/O (xri:x%x) to complete: "
6021                                 "ret %#x, ID %d, LUN %llu\n",
6022                                 iocb->sli4_xritag, ret,
6023                                 cmnd->device->id, cmnd->device->lun);
6024        }
6025
6026        lpfc_cmd->waitq = NULL;
6027
6028        spin_unlock(&lpfc_cmd->buf_lock);
6029        goto out;
6030
6031out_unlock_ring:
6032        if (phba->sli_rev == LPFC_SLI_REV4)
6033                spin_unlock(&pring_s4->ring_lock);
6034out_unlock_buf:
6035        spin_unlock(&lpfc_cmd->buf_lock);
6036out_unlock:
6037        spin_unlock_irqrestore(&phba->hbalock, flags);
6038out:
6039        lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6040                         "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
6041                         "LUN %llu\n", ret, cmnd->device->id,
6042                         cmnd->device->lun);
6043        return ret;
6044}
6045
6046static char *
6047lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
6048{
6049        switch (task_mgmt_cmd) {
6050        case FCP_ABORT_TASK_SET:
6051                return "ABORT_TASK_SET";
6052        case FCP_CLEAR_TASK_SET:
6053                return "FCP_CLEAR_TASK_SET";
6054        case FCP_BUS_RESET:
6055                return "FCP_BUS_RESET";
6056        case FCP_LUN_RESET:
6057                return "FCP_LUN_RESET";
6058        case FCP_TARGET_RESET:
6059                return "FCP_TARGET_RESET";
6060        case FCP_CLEAR_ACA:
6061                return "FCP_CLEAR_ACA";
6062        case FCP_TERMINATE_TASK:
6063                return "FCP_TERMINATE_TASK";
6064        default:
6065                return "unknown";
6066        }
6067}
6068
6069
6070/**
6071 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
6072 * @vport: The virtual port for which this call is being executed.
6073 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
6074 *
6075 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
6076 *
6077 * Return code :
6078 *   0x2003 - Error
6079 *   0x2002 - Success
6080 **/
6081static int
6082lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
6083{
6084        struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
6085        uint32_t rsp_info;
6086        uint32_t rsp_len;
6087        uint8_t  rsp_info_code;
6088        int ret = FAILED;
6089
6090
6091        if (fcprsp == NULL)
6092                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6093                                 "0703 fcp_rsp is missing\n");
6094        else {
6095                rsp_info = fcprsp->rspStatus2;
6096                rsp_len = be32_to_cpu(fcprsp->rspRspLen);
6097                rsp_info_code = fcprsp->rspInfo3;
6098
6099
6100                lpfc_printf_vlog(vport, KERN_INFO,
6101                                 LOG_FCP,
6102                                 "0706 fcp_rsp valid 0x%x,"
6103                                 " rsp len=%d code 0x%x\n",
6104                                 rsp_info,
6105                                 rsp_len, rsp_info_code);
6106
6107                /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
6108                 * field specifies the number of valid bytes of FCP_RSP_INFO.
6109                 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
6110                 */
6111                if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
6112                    ((rsp_len == 8) || (rsp_len == 4))) {
6113                        switch (rsp_info_code) {
6114                        case RSP_NO_FAILURE:
6115                                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6116                                                 "0715 Task Mgmt No Failure\n");
6117                                ret = SUCCESS;
6118                                break;
6119                        case RSP_TM_NOT_SUPPORTED: /* TM rejected */
6120                                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6121                                                 "0716 Task Mgmt Target "
6122                                                "reject\n");
6123                                break;
6124                        case RSP_TM_NOT_COMPLETED: /* TM failed */
6125                                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6126                                                 "0717 Task Mgmt Target "
6127                                                "failed TM\n");
6128                                break;
6129                        case RSP_TM_INVALID_LU: /* TM to invalid LU! */
6130                                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6131                                                 "0718 Task Mgmt to invalid "
6132                                                "LUN\n");
6133                                break;
6134                        }
6135                }
6136        }
6137        return ret;
6138}
6139
6140
6141/**
6142 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
6143 * @vport: The virtual port for which this call is being executed.
6144 * @cmnd: Pointer to scsi_cmnd data structure.
6145 * @tgt_id: Target ID of remote device.
6146 * @lun_id: Lun number for the TMF
6147 * @task_mgmt_cmd: type of TMF to send
6148 *
6149 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
6150 * a remote port.
6151 *
6152 * Return Code:
6153 *   0x2003 - Error
6154 *   0x2002 - Success.
6155 **/
6156static int
6157lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
6158                   unsigned int tgt_id, uint64_t lun_id,
6159                   uint8_t task_mgmt_cmd)
6160{
6161        struct lpfc_hba   *phba = vport->phba;
6162        struct lpfc_io_buf *lpfc_cmd;
6163        struct lpfc_iocbq *iocbq;
6164        struct lpfc_iocbq *iocbqrsp;
6165        struct lpfc_rport_data *rdata;
6166        struct lpfc_nodelist *pnode;
6167        int ret;
6168        int status;
6169
6170        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6171        if (!rdata || !rdata->pnode)
6172                return FAILED;
6173        pnode = rdata->pnode;
6174
6175        lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
6176        if (lpfc_cmd == NULL)
6177                return FAILED;
6178        lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
6179        lpfc_cmd->rdata = rdata;
6180        lpfc_cmd->pCmd = cmnd;
6181        lpfc_cmd->ndlp = pnode;
6182
6183        status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
6184                                           task_mgmt_cmd);
6185        if (!status) {
6186                lpfc_release_scsi_buf(phba, lpfc_cmd);
6187                return FAILED;
6188        }
6189
6190        iocbq = &lpfc_cmd->cur_iocbq;
6191        iocbqrsp = lpfc_sli_get_iocbq(phba);
6192        if (iocbqrsp == NULL) {
6193                lpfc_release_scsi_buf(phba, lpfc_cmd);
6194                return FAILED;
6195        }
6196        iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
6197
6198        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6199                         "0702 Issue %s to TGT %d LUN %llu "
6200                         "rpi x%x nlp_flag x%x Data: x%x x%x\n",
6201                         lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6202                         pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
6203                         iocbq->iocb_flag);
6204
6205        status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
6206                                          iocbq, iocbqrsp, lpfc_cmd->timeout);
6207        if ((status != IOCB_SUCCESS) ||
6208            (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
6209                if (status != IOCB_SUCCESS ||
6210                    iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
6211                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6212                                         "0727 TMF %s to TGT %d LUN %llu "
6213                                         "failed (%d, %d) iocb_flag x%x\n",
6214                                         lpfc_taskmgmt_name(task_mgmt_cmd),
6215                                         tgt_id, lun_id,
6216                                         iocbqrsp->iocb.ulpStatus,
6217                                         iocbqrsp->iocb.un.ulpWord[4],
6218                                         iocbq->iocb_flag);
6219                /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
6220                if (status == IOCB_SUCCESS) {
6221                        if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
6222                                /* Something in the FCP_RSP was invalid.
6223                                 * Check conditions */
6224                                ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
6225                        else
6226                                ret = FAILED;
6227                } else if (status == IOCB_TIMEDOUT) {
6228                        ret = TIMEOUT_ERROR;
6229                } else {
6230                        ret = FAILED;
6231                }
6232        } else
6233                ret = SUCCESS;
6234
6235        lpfc_sli_release_iocbq(phba, iocbqrsp);
6236
6237        if (ret != TIMEOUT_ERROR)
6238                lpfc_release_scsi_buf(phba, lpfc_cmd);
6239
6240        return ret;
6241}
6242
6243/**
6244 * lpfc_chk_tgt_mapped -
6245 * @vport: The virtual port to check on
6246 * @cmnd: Pointer to scsi_cmnd data structure.
6247 *
6248 * This routine delays until the scsi target (aka rport) for the
6249 * command exists (is present and logged in) or we declare it non-existent.
6250 *
6251 * Return code :
6252 *  0x2003 - Error
6253 *  0x2002 - Success
6254 **/
6255static int
6256lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
6257{
6258        struct lpfc_rport_data *rdata;
6259        struct lpfc_nodelist *pnode;
6260        unsigned long later;
6261
6262        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6263        if (!rdata) {
6264                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6265                        "0797 Tgt Map rport failure: rdata x%px\n", rdata);
6266                return FAILED;
6267        }
6268        pnode = rdata->pnode;
6269        /*
6270         * If target is not in a MAPPED state, delay until
6271         * target is rediscovered or devloss timeout expires.
6272         */
6273        later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6274        while (time_after(later, jiffies)) {
6275                if (!pnode)
6276                        return FAILED;
6277                if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
6278                        return SUCCESS;
6279                schedule_timeout_uninterruptible(msecs_to_jiffies(500));
6280                rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6281                if (!rdata)
6282                        return FAILED;
6283                pnode = rdata->pnode;
6284        }
6285        if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
6286                return FAILED;
6287        return SUCCESS;
6288}
6289
6290/**
6291 * lpfc_reset_flush_io_context -
6292 * @vport: The virtual port (scsi_host) for the flush context
6293 * @tgt_id: If aborting by Target contect - specifies the target id
6294 * @lun_id: If aborting by Lun context - specifies the lun id
6295 * @context: specifies the context level to flush at.
6296 *
6297 * After a reset condition via TMF, we need to flush orphaned i/o
6298 * contexts from the adapter. This routine aborts any contexts
6299 * outstanding, then waits for their completions. The wait is
6300 * bounded by devloss_tmo though.
6301 *
6302 * Return code :
6303 *  0x2003 - Error
6304 *  0x2002 - Success
6305 **/
6306static int
6307lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6308                        uint64_t lun_id, lpfc_ctx_cmd context)
6309{
6310        struct lpfc_hba   *phba = vport->phba;
6311        unsigned long later;
6312        int cnt;
6313
6314        cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6315        if (cnt)
6316                lpfc_sli_abort_taskmgmt(vport,
6317                                        &phba->sli.sli3_ring[LPFC_FCP_RING],
6318                                        tgt_id, lun_id, context);
6319        later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6320        while (time_after(later, jiffies) && cnt) {
6321                schedule_timeout_uninterruptible(msecs_to_jiffies(20));
6322                cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6323        }
6324        if (cnt) {
6325                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6326                        "0724 I/O flush failure for context %s : cnt x%x\n",
6327                        ((context == LPFC_CTX_LUN) ? "LUN" :
6328                         ((context == LPFC_CTX_TGT) ? "TGT" :
6329                          ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6330                        cnt);
6331                return FAILED;
6332        }
6333        return SUCCESS;
6334}
6335
6336/**
6337 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
6338 * @cmnd: Pointer to scsi_cmnd data structure.
6339 *
6340 * This routine does a device reset by sending a LUN_RESET task management
6341 * command.
6342 *
6343 * Return code :
6344 *  0x2003 - Error
6345 *  0x2002 - Success
6346 **/
6347static int
6348lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6349{
6350        struct Scsi_Host  *shost = cmnd->device->host;
6351        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6352        struct lpfc_rport_data *rdata;
6353        struct lpfc_nodelist *pnode;
6354        unsigned tgt_id = cmnd->device->id;
6355        uint64_t lun_id = cmnd->device->lun;
6356        struct lpfc_scsi_event_header scsi_event;
6357        int status;
6358        u32 logit = LOG_FCP;
6359
6360        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6361        if (!rdata || !rdata->pnode) {
6362                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6363                                 "0798 Device Reset rdata failure: rdata x%px\n",
6364                                 rdata);
6365                return FAILED;
6366        }
6367        pnode = rdata->pnode;
6368        status = fc_block_scsi_eh(cmnd);
6369        if (status != 0 && status != SUCCESS)
6370                return status;
6371
6372        status = lpfc_chk_tgt_mapped(vport, cmnd);
6373        if (status == FAILED) {
6374                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6375                        "0721 Device Reset rport failure: rdata x%px\n", rdata);
6376                return FAILED;
6377        }
6378
6379        scsi_event.event_type = FC_REG_SCSI_EVENT;
6380        scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6381        scsi_event.lun = lun_id;
6382        memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6383        memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6384
6385        fc_host_post_vendor_event(shost, fc_get_event_number(),
6386                sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6387
6388        status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
6389                                                FCP_LUN_RESET);
6390        if (status != SUCCESS)
6391                logit =  LOG_TRACE_EVENT;
6392
6393        lpfc_printf_vlog(vport, KERN_ERR, logit,
6394                         "0713 SCSI layer issued Device Reset (%d, %llu) "
6395                         "return x%x\n", tgt_id, lun_id, status);
6396
6397        /*
6398         * We have to clean up i/o as : they may be orphaned by the TMF;
6399         * or if the TMF failed, they may be in an indeterminate state.
6400         * So, continue on.
6401         * We will report success if all the i/o aborts successfully.
6402         */
6403        if (status == SUCCESS)
6404                status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6405                                                LPFC_CTX_LUN);
6406
6407        return status;
6408}
6409
6410/**
6411 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6412 * @cmnd: Pointer to scsi_cmnd data structure.
6413 *
6414 * This routine does a target reset by sending a TARGET_RESET task management
6415 * command.
6416 *
6417 * Return code :
6418 *  0x2003 - Error
6419 *  0x2002 - Success
6420 **/
6421static int
6422lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6423{
6424        struct Scsi_Host  *shost = cmnd->device->host;
6425        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6426        struct lpfc_rport_data *rdata;
6427        struct lpfc_nodelist *pnode;
6428        unsigned tgt_id = cmnd->device->id;
6429        uint64_t lun_id = cmnd->device->lun;
6430        struct lpfc_scsi_event_header scsi_event;
6431        int status;
6432        u32 logit = LOG_FCP;
6433        u32 dev_loss_tmo = vport->cfg_devloss_tmo;
6434        unsigned long flags;
6435        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6436
6437        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6438        if (!rdata || !rdata->pnode) {
6439                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6440                                 "0799 Target Reset rdata failure: rdata x%px\n",
6441                                 rdata);
6442                return FAILED;
6443        }
6444        pnode = rdata->pnode;
6445        status = fc_block_scsi_eh(cmnd);
6446        if (status != 0 && status != SUCCESS)
6447                return status;
6448
6449        status = lpfc_chk_tgt_mapped(vport, cmnd);
6450        if (status == FAILED) {
6451                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6452                        "0722 Target Reset rport failure: rdata x%px\n", rdata);
6453                if (pnode) {
6454                        spin_lock_irqsave(&pnode->lock, flags);
6455                        pnode->nlp_flag &= ~NLP_NPR_ADISC;
6456                        pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6457                        spin_unlock_irqrestore(&pnode->lock, flags);
6458                }
6459                lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6460                                          LPFC_CTX_TGT);
6461                return FAST_IO_FAIL;
6462        }
6463
6464        scsi_event.event_type = FC_REG_SCSI_EVENT;
6465        scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6466        scsi_event.lun = 0;
6467        memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6468        memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6469
6470        fc_host_post_vendor_event(shost, fc_get_event_number(),
6471                sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6472
6473        status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
6474                                        FCP_TARGET_RESET);
6475        if (status != SUCCESS) {
6476                logit = LOG_TRACE_EVENT;
6477
6478                /* Issue LOGO, if no LOGO is outstanding */
6479                spin_lock_irqsave(&pnode->lock, flags);
6480                if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
6481                    !pnode->logo_waitq) {
6482                        pnode->logo_waitq = &waitq;
6483                        pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6484                        pnode->nlp_flag |= NLP_ISSUE_LOGO;
6485                        pnode->save_flags |= NLP_WAIT_FOR_LOGO;
6486                        spin_unlock_irqrestore(&pnode->lock, flags);
6487                        lpfc_unreg_rpi(vport, pnode);
6488                        wait_event_timeout(waitq,
6489                                           (!(pnode->save_flags &
6490                                              NLP_WAIT_FOR_LOGO)),
6491                                           msecs_to_jiffies(dev_loss_tmo *
6492                                                            1000));
6493
6494                        if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
6495                                lpfc_printf_vlog(vport, KERN_ERR, logit,
6496                                                 "0725 SCSI layer TGTRST "
6497                                                 "failed & LOGO TMO (%d, %llu) "
6498                                                 "return x%x\n",
6499                                                 tgt_id, lun_id, status);
6500                                spin_lock_irqsave(&pnode->lock, flags);
6501                                pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
6502                        } else {
6503                                spin_lock_irqsave(&pnode->lock, flags);
6504                        }
6505                        pnode->logo_waitq = NULL;
6506                        spin_unlock_irqrestore(&pnode->lock, flags);
6507                        status = SUCCESS;
6508
6509                } else {
6510                        spin_unlock_irqrestore(&pnode->lock, flags);
6511                        status = FAILED;
6512                }
6513        }
6514
6515        lpfc_printf_vlog(vport, KERN_ERR, logit,
6516                         "0723 SCSI layer issued Target Reset (%d, %llu) "
6517                         "return x%x\n", tgt_id, lun_id, status);
6518
6519        /*
6520         * We have to clean up i/o as : they may be orphaned by the TMF;
6521         * or if the TMF failed, they may be in an indeterminate state.
6522         * So, continue on.
6523         * We will report success if all the i/o aborts successfully.
6524         */
6525        if (status == SUCCESS)
6526                status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6527                                          LPFC_CTX_TGT);
6528        return status;
6529}
6530
6531/**
6532 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
6533 * @cmnd: Pointer to scsi_cmnd data structure.
6534 *
6535 * This routine does target reset to all targets on @cmnd->device->host.
6536 * This emulates Parallel SCSI Bus Reset Semantics.
6537 *
6538 * Return code :
6539 *  0x2003 - Error
6540 *  0x2002 - Success
6541 **/
6542static int
6543lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
6544{
6545        struct Scsi_Host  *shost = cmnd->device->host;
6546        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6547        struct lpfc_nodelist *ndlp = NULL;
6548        struct lpfc_scsi_event_header scsi_event;
6549        int match;
6550        int ret = SUCCESS, status, i;
6551        u32 logit = LOG_FCP;
6552
6553        scsi_event.event_type = FC_REG_SCSI_EVENT;
6554        scsi_event.subcategory = LPFC_EVENT_BUSRESET;
6555        scsi_event.lun = 0;
6556        memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
6557        memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
6558
6559        fc_host_post_vendor_event(shost, fc_get_event_number(),
6560                sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6561
6562        status = fc_block_scsi_eh(cmnd);
6563        if (status != 0 && status != SUCCESS)
6564                return status;
6565
6566        /*
6567         * Since the driver manages a single bus device, reset all
6568         * targets known to the driver.  Should any target reset
6569         * fail, this routine returns failure to the midlayer.
6570         */
6571        for (i = 0; i < LPFC_MAX_TARGET; i++) {
6572                /* Search for mapped node by target ID */
6573                match = 0;
6574                spin_lock_irq(shost->host_lock);
6575                list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6576
6577                        if (vport->phba->cfg_fcp2_no_tgt_reset &&
6578                            (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
6579                                continue;
6580                        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6581                            ndlp->nlp_sid == i &&
6582                            ndlp->rport &&
6583                            ndlp->nlp_type & NLP_FCP_TARGET) {
6584                                match = 1;
6585                                break;
6586                        }
6587                }
6588                spin_unlock_irq(shost->host_lock);
6589                if (!match)
6590                        continue;
6591
6592                status = lpfc_send_taskmgmt(vport, cmnd,
6593                                        i, 0, FCP_TARGET_RESET);
6594
6595                if (status != SUCCESS) {
6596                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6597                                         "0700 Bus Reset on target %d failed\n",
6598                                         i);
6599                        ret = FAILED;
6600                }
6601        }
6602        /*
6603         * We have to clean up i/o as : they may be orphaned by the TMFs
6604         * above; or if any of the TMFs failed, they may be in an
6605         * indeterminate state.
6606         * We will report success if all the i/o aborts successfully.
6607         */
6608
6609        status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
6610        if (status != SUCCESS)
6611                ret = FAILED;
6612        if (ret == FAILED)
6613                logit =  LOG_TRACE_EVENT;
6614
6615        lpfc_printf_vlog(vport, KERN_ERR, logit,
6616                         "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
6617        return ret;
6618}
6619
6620/**
6621 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6622 * @cmnd: Pointer to scsi_cmnd data structure.
6623 *
6624 * This routine does host reset to the adaptor port. It brings the HBA
6625 * offline, performs a board restart, and then brings the board back online.
6626 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6627 * reject all outstanding SCSI commands to the host and error returned
6628 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6629 * of error handling, it will only return error if resetting of the adapter
6630 * is not successful; in all other cases, will return success.
6631 *
6632 * Return code :
6633 *  0x2003 - Error
6634 *  0x2002 - Success
6635 **/
6636static int
6637lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6638{
6639        struct Scsi_Host *shost = cmnd->device->host;
6640        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6641        struct lpfc_hba *phba = vport->phba;
6642        int rc, ret = SUCCESS;
6643
6644        lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6645                         "3172 SCSI layer issued Host Reset Data:\n");
6646
6647        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6648        lpfc_offline(phba);
6649        rc = lpfc_sli_brdrestart(phba);
6650        if (rc)
6651                goto error;
6652
6653        /* Wait for successful restart of adapter */
6654        if (phba->sli_rev < LPFC_SLI_REV4) {
6655                rc = lpfc_sli_chipset_init(phba);
6656                if (rc)
6657                        goto error;
6658        }
6659
6660        rc = lpfc_online(phba);
6661        if (rc)
6662                goto error;
6663
6664        lpfc_unblock_mgmt_io(phba);
6665
6666        return ret;
6667error:
6668        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6669                         "3323 Failed host reset\n");
6670        lpfc_unblock_mgmt_io(phba);
6671        return FAILED;
6672}
6673
6674/**
6675 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6676 * @sdev: Pointer to scsi_device.
6677 *
6678 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
6679 * globally available list of scsi buffers. This routine also makes sure scsi
6680 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6681 * of scsi buffer exists for the lifetime of the driver.
6682 *
6683 * Return codes:
6684 *   non-0 - Error
6685 *   0 - Success
6686 **/
6687static int
6688lpfc_slave_alloc(struct scsi_device *sdev)
6689{
6690        struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6691        struct lpfc_hba   *phba = vport->phba;
6692        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6693        uint32_t total = 0;
6694        uint32_t num_to_alloc = 0;
6695        int num_allocated = 0;
6696        uint32_t sdev_cnt;
6697        struct lpfc_device_data *device_data;
6698        unsigned long flags;
6699        struct lpfc_name target_wwpn;
6700
6701        if (!rport || fc_remote_port_chkready(rport))
6702                return -ENXIO;
6703
6704        if (phba->cfg_fof) {
6705
6706                /*
6707                 * Check to see if the device data structure for the lun
6708                 * exists.  If not, create one.
6709                 */
6710
6711                u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6712                spin_lock_irqsave(&phba->devicelock, flags);
6713                device_data = __lpfc_get_device_data(phba,
6714                                                     &phba->luns,
6715                                                     &vport->fc_portname,
6716                                                     &target_wwpn,
6717                                                     sdev->lun);
6718                if (!device_data) {
6719                        spin_unlock_irqrestore(&phba->devicelock, flags);
6720                        device_data = lpfc_create_device_data(phba,
6721                                                        &vport->fc_portname,
6722                                                        &target_wwpn,
6723                                                        sdev->lun,
6724                                                        phba->cfg_XLanePriority,
6725                                                        true);
6726                        if (!device_data)
6727                                return -ENOMEM;
6728                        spin_lock_irqsave(&phba->devicelock, flags);
6729                        list_add_tail(&device_data->listentry, &phba->luns);
6730                }
6731                device_data->rport_data = rport->dd_data;
6732                device_data->available = true;
6733                spin_unlock_irqrestore(&phba->devicelock, flags);
6734                sdev->hostdata = device_data;
6735        } else {
6736                sdev->hostdata = rport->dd_data;
6737        }
6738        sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6739
6740        /* For SLI4, all IO buffers are pre-allocated */
6741        if (phba->sli_rev == LPFC_SLI_REV4)
6742                return 0;
6743
6744        /* This code path is now ONLY for SLI3 adapters */
6745
6746        /*
6747         * Populate the cmds_per_lun count scsi_bufs into this host's globally
6748         * available list of scsi buffers.  Don't allocate more than the
6749         * HBA limit conveyed to the midlayer via the host structure.  The
6750         * formula accounts for the lun_queue_depth + error handlers + 1
6751         * extra.  This list of scsi bufs exists for the lifetime of the driver.
6752         */
6753        total = phba->total_scsi_bufs;
6754        num_to_alloc = vport->cfg_lun_queue_depth + 2;
6755
6756        /* If allocated buffers are enough do nothing */
6757        if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6758                return 0;
6759
6760        /* Allow some exchanges to be available always to complete discovery */
6761        if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6762                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6763                                 "0704 At limitation of %d preallocated "
6764                                 "command buffers\n", total);
6765                return 0;
6766        /* Allow some exchanges to be available always to complete discovery */
6767        } else if (total + num_to_alloc >
6768                phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6769                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6770                                 "0705 Allocation request of %d "
6771                                 "command buffers will exceed max of %d.  "
6772                                 "Reducing allocation request to %d.\n",
6773                                 num_to_alloc, phba->cfg_hba_queue_depth,
6774                                 (phba->cfg_hba_queue_depth - total));
6775                num_to_alloc = phba->cfg_hba_queue_depth - total;
6776        }
6777        num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6778        if (num_to_alloc != num_allocated) {
6779                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6780                                         "0708 Allocation request of %d "
6781                                         "command buffers did not succeed.  "
6782                                         "Allocated %d buffers.\n",
6783                                         num_to_alloc, num_allocated);
6784        }
6785        if (num_allocated > 0)
6786                phba->total_scsi_bufs += num_allocated;
6787        return 0;
6788}
6789
6790/**
6791 * lpfc_slave_configure - scsi_host_template slave_configure entry point
6792 * @sdev: Pointer to scsi_device.
6793 *
6794 * This routine configures following items
6795 *   - Tag command queuing support for @sdev if supported.
6796 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6797 *
6798 * Return codes:
6799 *   0 - Success
6800 **/
6801static int
6802lpfc_slave_configure(struct scsi_device *sdev)
6803{
6804        struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6805        struct lpfc_hba   *phba = vport->phba;
6806
6807        scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6808
6809        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6810                lpfc_sli_handle_fast_ring_event(phba,
6811                        &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6812                if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6813                        lpfc_poll_rearm_timer(phba);
6814        }
6815
6816        return 0;
6817}
6818
6819/**
6820 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6821 * @sdev: Pointer to scsi_device.
6822 *
6823 * This routine sets @sdev hostatdata filed to null.
6824 **/
6825static void
6826lpfc_slave_destroy(struct scsi_device *sdev)
6827{
6828        struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6829        struct lpfc_hba   *phba = vport->phba;
6830        unsigned long flags;
6831        struct lpfc_device_data *device_data = sdev->hostdata;
6832
6833        atomic_dec(&phba->sdev_cnt);
6834        if ((phba->cfg_fof) && (device_data)) {
6835                spin_lock_irqsave(&phba->devicelock, flags);
6836                device_data->available = false;
6837                if (!device_data->oas_enabled)
6838                        lpfc_delete_device_data(phba, device_data);
6839                spin_unlock_irqrestore(&phba->devicelock, flags);
6840        }
6841        sdev->hostdata = NULL;
6842        return;
6843}
6844
6845/**
6846 * lpfc_create_device_data - creates and initializes device data structure for OAS
6847 * @phba: Pointer to host bus adapter structure.
6848 * @vport_wwpn: Pointer to vport's wwpn information
6849 * @target_wwpn: Pointer to target's wwpn information
6850 * @lun: Lun on target
6851 * @pri: Priority
6852 * @atomic_create: Flag to indicate if memory should be allocated using the
6853 *                GFP_ATOMIC flag or not.
6854 *
6855 * This routine creates a device data structure which will contain identifying
6856 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6857 * whether or not the corresponding lun is available by the system,
6858 * and pointer to the rport data.
6859 *
6860 * Return codes:
6861 *   NULL - Error
6862 *   Pointer to lpfc_device_data - Success
6863 **/
6864struct lpfc_device_data*
6865lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6866                        struct lpfc_name *target_wwpn, uint64_t lun,
6867                        uint32_t pri, bool atomic_create)
6868{
6869
6870        struct lpfc_device_data *lun_info;
6871        int memory_flags;
6872
6873        if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
6874            !(phba->cfg_fof))
6875                return NULL;
6876
6877        /* Attempt to create the device data to contain lun info */
6878
6879        if (atomic_create)
6880                memory_flags = GFP_ATOMIC;
6881        else
6882                memory_flags = GFP_KERNEL;
6883        lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6884        if (!lun_info)
6885                return NULL;
6886        INIT_LIST_HEAD(&lun_info->listentry);
6887        lun_info->rport_data  = NULL;
6888        memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6889               sizeof(struct lpfc_name));
6890        memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6891               sizeof(struct lpfc_name));
6892        lun_info->device_id.lun = lun;
6893        lun_info->oas_enabled = false;
6894        lun_info->priority = pri;
6895        lun_info->available = false;
6896        return lun_info;
6897}
6898
6899/**
6900 * lpfc_delete_device_data - frees a device data structure for OAS
6901 * @phba: Pointer to host bus adapter structure.
6902 * @lun_info: Pointer to device data structure to free.
6903 *
6904 * This routine frees the previously allocated device data structure passed.
6905 *
6906 **/
6907void
6908lpfc_delete_device_data(struct lpfc_hba *phba,
6909                        struct lpfc_device_data *lun_info)
6910{
6911
6912        if (unlikely(!phba) || !lun_info  ||
6913            !(phba->cfg_fof))
6914                return;
6915
6916        if (!list_empty(&lun_info->listentry))
6917                list_del(&lun_info->listentry);
6918        mempool_free(lun_info, phba->device_data_mem_pool);
6919        return;
6920}
6921
6922/**
6923 * __lpfc_get_device_data - returns the device data for the specified lun
6924 * @phba: Pointer to host bus adapter structure.
6925 * @list: Point to list to search.
6926 * @vport_wwpn: Pointer to vport's wwpn information
6927 * @target_wwpn: Pointer to target's wwpn information
6928 * @lun: Lun on target
6929 *
6930 * This routine searches the list passed for the specified lun's device data.
6931 * This function does not hold locks, it is the responsibility of the caller
6932 * to ensure the proper lock is held before calling the function.
6933 *
6934 * Return codes:
6935 *   NULL - Error
6936 *   Pointer to lpfc_device_data - Success
6937 **/
6938struct lpfc_device_data*
6939__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6940                       struct lpfc_name *vport_wwpn,
6941                       struct lpfc_name *target_wwpn, uint64_t lun)
6942{
6943
6944        struct lpfc_device_data *lun_info;
6945
6946        if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6947            !phba->cfg_fof)
6948                return NULL;
6949
6950        /* Check to see if the lun is already enabled for OAS. */
6951
6952        list_for_each_entry(lun_info, list, listentry) {
6953                if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6954                            sizeof(struct lpfc_name)) == 0) &&
6955                    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6956                            sizeof(struct lpfc_name)) == 0) &&
6957                    (lun_info->device_id.lun == lun))
6958                        return lun_info;
6959        }
6960
6961        return NULL;
6962}
6963
6964/**
6965 * lpfc_find_next_oas_lun - searches for the next oas lun
6966 * @phba: Pointer to host bus adapter structure.
6967 * @vport_wwpn: Pointer to vport's wwpn information
6968 * @target_wwpn: Pointer to target's wwpn information
6969 * @starting_lun: Pointer to the lun to start searching for
6970 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6971 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6972 * @found_lun: Pointer to the found lun.
6973 * @found_lun_status: Pointer to status of the found lun.
6974 * @found_lun_pri: Pointer to priority of the found lun.
6975 *
6976 * This routine searches the luns list for the specified lun
6977 * or the first lun for the vport/target.  If the vport wwpn contains
6978 * a zero value then a specific vport is not specified. In this case
6979 * any vport which contains the lun will be considered a match.  If the
6980 * target wwpn contains a zero value then a specific target is not specified.
6981 * In this case any target which contains the lun will be considered a
6982 * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
6983 * are returned.  The function will also return the next lun if available.
6984 * If the next lun is not found, starting_lun parameter will be set to
6985 * NO_MORE_OAS_LUN.
6986 *
6987 * Return codes:
6988 *   non-0 - Error
6989 *   0 - Success
6990 **/
6991bool
6992lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6993                       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6994                       struct lpfc_name *found_vport_wwpn,
6995                       struct lpfc_name *found_target_wwpn,
6996                       uint64_t *found_lun,
6997                       uint32_t *found_lun_status,
6998                       uint32_t *found_lun_pri)
6999{
7000
7001        unsigned long flags;
7002        struct lpfc_device_data *lun_info;
7003        struct lpfc_device_id *device_id;
7004        uint64_t lun;
7005        bool found = false;
7006
7007        if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
7008            !starting_lun || !found_vport_wwpn ||
7009            !found_target_wwpn || !found_lun || !found_lun_status ||
7010            (*starting_lun == NO_MORE_OAS_LUN) ||
7011            !phba->cfg_fof)
7012                return false;
7013
7014        lun = *starting_lun;
7015        *found_lun = NO_MORE_OAS_LUN;
7016        *starting_lun = NO_MORE_OAS_LUN;
7017
7018        /* Search for lun or the lun closet in value */
7019
7020        spin_lock_irqsave(&phba->devicelock, flags);
7021        list_for_each_entry(lun_info, &phba->luns, listentry) {
7022                if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
7023                     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
7024                            sizeof(struct lpfc_name)) == 0)) &&
7025                    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
7026                     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
7027                            sizeof(struct lpfc_name)) == 0)) &&
7028                    (lun_info->oas_enabled)) {
7029                        device_id = &lun_info->device_id;
7030                        if ((!found) &&
7031                            ((lun == FIND_FIRST_OAS_LUN) ||
7032                             (device_id->lun == lun))) {
7033                                *found_lun = device_id->lun;
7034                                memcpy(found_vport_wwpn,
7035                                       &device_id->vport_wwpn,
7036                                       sizeof(struct lpfc_name));
7037                                memcpy(found_target_wwpn,
7038                                       &device_id->target_wwpn,
7039                                       sizeof(struct lpfc_name));
7040                                if (lun_info->available)
7041                                        *found_lun_status =
7042                                                OAS_LUN_STATUS_EXISTS;
7043                                else
7044                                        *found_lun_status = 0;
7045                                *found_lun_pri = lun_info->priority;
7046                                if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
7047                                        memset(vport_wwpn, 0x0,
7048                                               sizeof(struct lpfc_name));
7049                                if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
7050                                        memset(target_wwpn, 0x0,
7051                                               sizeof(struct lpfc_name));
7052                                found = true;
7053                        } else if (found) {
7054                                *starting_lun = device_id->lun;
7055                                memcpy(vport_wwpn, &device_id->vport_wwpn,
7056                                       sizeof(struct lpfc_name));
7057                                memcpy(target_wwpn, &device_id->target_wwpn,
7058                                       sizeof(struct lpfc_name));
7059                                break;
7060                        }
7061                }
7062        }
7063        spin_unlock_irqrestore(&phba->devicelock, flags);
7064        return found;
7065}
7066
7067/**
7068 * lpfc_enable_oas_lun - enables a lun for OAS operations
7069 * @phba: Pointer to host bus adapter structure.
7070 * @vport_wwpn: Pointer to vport's wwpn information
7071 * @target_wwpn: Pointer to target's wwpn information
7072 * @lun: Lun
7073 * @pri: Priority
7074 *
7075 * This routine enables a lun for oas operations.  The routines does so by
7076 * doing the following :
7077 *
7078 *   1) Checks to see if the device data for the lun has been created.
7079 *   2) If found, sets the OAS enabled flag if not set and returns.
7080 *   3) Otherwise, creates a device data structure.
7081 *   4) If successfully created, indicates the device data is for an OAS lun,
7082 *   indicates the lun is not available and add to the list of luns.
7083 *
7084 * Return codes:
7085 *   false - Error
7086 *   true - Success
7087 **/
7088bool
7089lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
7090                    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
7091{
7092
7093        struct lpfc_device_data *lun_info;
7094        unsigned long flags;
7095
7096        if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
7097            !phba->cfg_fof)
7098                return false;
7099
7100        spin_lock_irqsave(&phba->devicelock, flags);
7101
7102        /* Check to see if the device data for the lun has been created */
7103        lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
7104                                          target_wwpn, lun);
7105        if (lun_info) {
7106                if (!lun_info->oas_enabled)
7107                        lun_info->oas_enabled = true;
7108                lun_info->priority = pri;
7109                spin_unlock_irqrestore(&phba->devicelock, flags);
7110                return true;
7111        }
7112
7113        /* Create an lun info structure and add to list of luns */
7114        lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
7115                                           pri, true);
7116        if (lun_info) {
7117                lun_info->oas_enabled = true;
7118                lun_info->priority = pri;
7119                lun_info->available = false;
7120                list_add_tail(&lun_info->listentry, &phba->luns);
7121                spin_unlock_irqrestore(&phba->devicelock, flags);
7122                return true;
7123        }
7124        spin_unlock_irqrestore(&phba->devicelock, flags);
7125        return false;
7126}
7127
7128/**
7129 * lpfc_disable_oas_lun - disables a lun for OAS operations
7130 * @phba: Pointer to host bus adapter structure.
7131 * @vport_wwpn: Pointer to vport's wwpn information
7132 * @target_wwpn: Pointer to target's wwpn information
7133 * @lun: Lun
7134 * @pri: Priority
7135 *
7136 * This routine disables a lun for oas operations.  The routines does so by
7137 * doing the following :
7138 *
7139 *   1) Checks to see if the device data for the lun is created.
7140 *   2) If present, clears the flag indicating this lun is for OAS.
7141 *   3) If the lun is not available by the system, the device data is
7142 *   freed.
7143 *
7144 * Return codes:
7145 *   false - Error
7146 *   true - Success
7147 **/
7148bool
7149lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
7150                     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
7151{
7152
7153        struct lpfc_device_data *lun_info;
7154        unsigned long flags;
7155
7156        if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
7157            !phba->cfg_fof)
7158                return false;
7159
7160        spin_lock_irqsave(&phba->devicelock, flags);
7161
7162        /* Check to see if the lun is available. */
7163        lun_info = __lpfc_get_device_data(phba,
7164                                          &phba->luns, vport_wwpn,
7165                                          target_wwpn, lun);
7166        if (lun_info) {
7167                lun_info->oas_enabled = false;
7168                lun_info->priority = pri;
7169                if (!lun_info->available)
7170                        lpfc_delete_device_data(phba, lun_info);
7171                spin_unlock_irqrestore(&phba->devicelock, flags);
7172                return true;
7173        }
7174
7175        spin_unlock_irqrestore(&phba->devicelock, flags);
7176        return false;
7177}
7178
7179static int
7180lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
7181{
7182        return SCSI_MLQUEUE_HOST_BUSY;
7183}
7184
7185static int
7186lpfc_no_handler(struct scsi_cmnd *cmnd)
7187{
7188        return FAILED;
7189}
7190
7191static int
7192lpfc_no_slave(struct scsi_device *sdev)
7193{
7194        return -ENODEV;
7195}
7196
7197struct scsi_host_template lpfc_template_nvme = {
7198        .module                 = THIS_MODULE,
7199        .name                   = LPFC_DRIVER_NAME,
7200        .proc_name              = LPFC_DRIVER_NAME,
7201        .info                   = lpfc_info,
7202        .queuecommand           = lpfc_no_command,
7203        .eh_abort_handler       = lpfc_no_handler,
7204        .eh_device_reset_handler = lpfc_no_handler,
7205        .eh_target_reset_handler = lpfc_no_handler,
7206        .eh_bus_reset_handler   = lpfc_no_handler,
7207        .eh_host_reset_handler  = lpfc_no_handler,
7208        .slave_alloc            = lpfc_no_slave,
7209        .slave_configure        = lpfc_no_slave,
7210        .scan_finished          = lpfc_scan_finished,
7211        .this_id                = -1,
7212        .sg_tablesize           = 1,
7213        .cmd_per_lun            = 1,
7214        .shost_groups           = lpfc_hba_groups,
7215        .max_sectors            = 0xFFFFFFFF,
7216        .vendor_id              = LPFC_NL_VENDOR_ID,
7217        .track_queue_depth      = 0,
7218};
7219
7220struct scsi_host_template lpfc_template = {
7221        .module                 = THIS_MODULE,
7222        .name                   = LPFC_DRIVER_NAME,
7223        .proc_name              = LPFC_DRIVER_NAME,
7224        .info                   = lpfc_info,
7225        .queuecommand           = lpfc_queuecommand,
7226        .eh_timed_out           = fc_eh_timed_out,
7227        .eh_should_retry_cmd    = fc_eh_should_retry_cmd,
7228        .eh_abort_handler       = lpfc_abort_handler,
7229        .eh_device_reset_handler = lpfc_device_reset_handler,
7230        .eh_target_reset_handler = lpfc_target_reset_handler,
7231        .eh_bus_reset_handler   = lpfc_bus_reset_handler,
7232        .eh_host_reset_handler  = lpfc_host_reset_handler,
7233        .slave_alloc            = lpfc_slave_alloc,
7234        .slave_configure        = lpfc_slave_configure,
7235        .slave_destroy          = lpfc_slave_destroy,
7236        .scan_finished          = lpfc_scan_finished,
7237        .this_id                = -1,
7238        .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
7239        .cmd_per_lun            = LPFC_CMD_PER_LUN,
7240        .shost_groups           = lpfc_hba_groups,
7241        .max_sectors            = 0xFFFFFFFF,
7242        .vendor_id              = LPFC_NL_VENDOR_ID,
7243        .change_queue_depth     = scsi_change_queue_depth,
7244        .track_queue_depth      = 1,
7245};
7246