linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_target.h"
   9
  10#include <linux/blkdev.h>
  11#include <linux/delay.h>
  12
  13#include <scsi/scsi_tcq.h>
  14
  15/**
  16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17 * @cmd: SCSI command
  18 *
  19 * Returns the proper CF_* direction based on CDB.
  20 */
  21static inline uint16_t
  22qla2x00_get_cmd_direction(srb_t *sp)
  23{
  24        uint16_t cflags;
  25        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  26        struct scsi_qla_host *vha = sp->vha;
  27
  28        cflags = 0;
  29
  30        /* Set transfer direction */
  31        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  32                cflags = CF_WRITE;
  33                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  34                vha->qla_stats.output_requests++;
  35        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  36                cflags = CF_READ;
  37                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  38                vha->qla_stats.input_requests++;
  39        }
  40        return (cflags);
  41}
  42
  43/**
  44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  45 * Continuation Type 0 IOCBs to allocate.
  46 *
  47 * @dsds: number of data segment decriptors needed
  48 *
  49 * Returns the number of IOCB entries needed to store @dsds.
  50 */
  51uint16_t
  52qla2x00_calc_iocbs_32(uint16_t dsds)
  53{
  54        uint16_t iocbs;
  55
  56        iocbs = 1;
  57        if (dsds > 3) {
  58                iocbs += (dsds - 3) / 7;
  59                if ((dsds - 3) % 7)
  60                        iocbs++;
  61        }
  62        return (iocbs);
  63}
  64
  65/**
  66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  67 * Continuation Type 1 IOCBs to allocate.
  68 *
  69 * @dsds: number of data segment decriptors needed
  70 *
  71 * Returns the number of IOCB entries needed to store @dsds.
  72 */
  73uint16_t
  74qla2x00_calc_iocbs_64(uint16_t dsds)
  75{
  76        uint16_t iocbs;
  77
  78        iocbs = 1;
  79        if (dsds > 2) {
  80                iocbs += (dsds - 2) / 5;
  81                if ((dsds - 2) % 5)
  82                        iocbs++;
  83        }
  84        return (iocbs);
  85}
  86
  87/**
  88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  89 * @ha: HA context
  90 *
  91 * Returns a pointer to the Continuation Type 0 IOCB packet.
  92 */
  93static inline cont_entry_t *
  94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  95{
  96        cont_entry_t *cont_pkt;
  97        struct req_que *req = vha->req;
  98        /* Adjust ring index. */
  99        req->ring_index++;
 100        if (req->ring_index == req->length) {
 101                req->ring_index = 0;
 102                req->ring_ptr = req->ring;
 103        } else {
 104                req->ring_ptr++;
 105        }
 106
 107        cont_pkt = (cont_entry_t *)req->ring_ptr;
 108
 109        /* Load packet defaults. */
 110        *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
 111
 112        return (cont_pkt);
 113}
 114
 115/**
 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 117 * @ha: HA context
 118 *
 119 * Returns a pointer to the continuation type 1 IOCB packet.
 120 */
 121static inline cont_a64_entry_t *
 122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 123{
 124        cont_a64_entry_t *cont_pkt;
 125
 126        /* Adjust ring index. */
 127        req->ring_index++;
 128        if (req->ring_index == req->length) {
 129                req->ring_index = 0;
 130                req->ring_ptr = req->ring;
 131        } else {
 132                req->ring_ptr++;
 133        }
 134
 135        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 136
 137        /* Load packet defaults. */
 138        *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
 139            cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
 140            cpu_to_le32(CONTINUE_A64_TYPE);
 141
 142        return (cont_pkt);
 143}
 144
 145inline int
 146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 147{
 148        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 149        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 150
 151        /* We always use DIFF Bundling for best performance */
 152        *fw_prot_opts = 0;
 153
 154        /* Translate SCSI opcode to a protection opcode */
 155        switch (scsi_get_prot_op(cmd)) {
 156        case SCSI_PROT_READ_STRIP:
 157                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 158                break;
 159        case SCSI_PROT_WRITE_INSERT:
 160                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 161                break;
 162        case SCSI_PROT_READ_INSERT:
 163                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 164                break;
 165        case SCSI_PROT_WRITE_STRIP:
 166                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 167                break;
 168        case SCSI_PROT_READ_PASS:
 169        case SCSI_PROT_WRITE_PASS:
 170                if (guard & SHOST_DIX_GUARD_IP)
 171                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 172                else
 173                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 174                break;
 175        default:        /* Normal Request */
 176                *fw_prot_opts |= PO_MODE_DIF_PASS;
 177                break;
 178        }
 179
 180        return scsi_prot_sg_count(cmd);
 181}
 182
 183/*
 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 185 * capable IOCB types.
 186 *
 187 * @sp: SRB command to process
 188 * @cmd_pkt: Command type 2 IOCB
 189 * @tot_dsds: Total number of segments to transfer
 190 */
 191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 192    uint16_t tot_dsds)
 193{
 194        uint16_t        avail_dsds;
 195        uint32_t        *cur_dsd;
 196        scsi_qla_host_t *vha;
 197        struct scsi_cmnd *cmd;
 198        struct scatterlist *sg;
 199        int i;
 200
 201        cmd = GET_CMD_SP(sp);
 202
 203        /* Update entry type to indicate Command Type 2 IOCB */
 204        *((uint32_t *)(&cmd_pkt->entry_type)) =
 205            cpu_to_le32(COMMAND_TYPE);
 206
 207        /* No data transfer */
 208        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 209                cmd_pkt->byte_count = cpu_to_le32(0);
 210                return;
 211        }
 212
 213        vha = sp->vha;
 214        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 215
 216        /* Three DSDs are available in the Command Type 2 IOCB */
 217        avail_dsds = 3;
 218        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 219
 220        /* Load data segments */
 221        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 222                cont_entry_t *cont_pkt;
 223
 224                /* Allocate additional continuation packets? */
 225                if (avail_dsds == 0) {
 226                        /*
 227                         * Seven DSDs are available in the Continuation
 228                         * Type 0 IOCB.
 229                         */
 230                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 231                        cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
 232                        avail_dsds = 7;
 233                }
 234
 235                *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
 236                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 237                avail_dsds--;
 238        }
 239}
 240
 241/**
 242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 243 * capable IOCB types.
 244 *
 245 * @sp: SRB command to process
 246 * @cmd_pkt: Command type 3 IOCB
 247 * @tot_dsds: Total number of segments to transfer
 248 */
 249void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 250    uint16_t tot_dsds)
 251{
 252        uint16_t        avail_dsds;
 253        uint32_t        *cur_dsd;
 254        scsi_qla_host_t *vha;
 255        struct scsi_cmnd *cmd;
 256        struct scatterlist *sg;
 257        int i;
 258
 259        cmd = GET_CMD_SP(sp);
 260
 261        /* Update entry type to indicate Command Type 3 IOCB */
 262        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
 263
 264        /* No data transfer */
 265        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 266                cmd_pkt->byte_count = cpu_to_le32(0);
 267                return;
 268        }
 269
 270        vha = sp->vha;
 271        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 272
 273        /* Two DSDs are available in the Command Type 3 IOCB */
 274        avail_dsds = 2;
 275        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 276
 277        /* Load data segments */
 278        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 279                dma_addr_t      sle_dma;
 280                cont_a64_entry_t *cont_pkt;
 281
 282                /* Allocate additional continuation packets? */
 283                if (avail_dsds == 0) {
 284                        /*
 285                         * Five DSDs are available in the Continuation
 286                         * Type 1 IOCB.
 287                         */
 288                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 289                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 290                        avail_dsds = 5;
 291                }
 292
 293                sle_dma = sg_dma_address(sg);
 294                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 295                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 296                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 297                avail_dsds--;
 298        }
 299}
 300
 301/**
 302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 303 * @sp: command to send to the ISP
 304 *
 305 * Returns non-zero if a failure occurred, else zero.
 306 */
 307int
 308qla2x00_start_scsi(srb_t *sp)
 309{
 310        int             nseg;
 311        unsigned long   flags;
 312        scsi_qla_host_t *vha;
 313        struct scsi_cmnd *cmd;
 314        uint32_t        *clr_ptr;
 315        uint32_t        index;
 316        uint32_t        handle;
 317        cmd_entry_t     *cmd_pkt;
 318        uint16_t        cnt;
 319        uint16_t        req_cnt;
 320        uint16_t        tot_dsds;
 321        struct device_reg_2xxx __iomem *reg;
 322        struct qla_hw_data *ha;
 323        struct req_que *req;
 324        struct rsp_que *rsp;
 325
 326        /* Setup device pointers. */
 327        vha = sp->vha;
 328        ha = vha->hw;
 329        reg = &ha->iobase->isp;
 330        cmd = GET_CMD_SP(sp);
 331        req = ha->req_q_map[0];
 332        rsp = ha->rsp_q_map[0];
 333        /* So we know we haven't pci_map'ed anything yet */
 334        tot_dsds = 0;
 335
 336        /* Send marker if required */
 337        if (vha->marker_needed != 0) {
 338                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
 339                    QLA_SUCCESS) {
 340                        return (QLA_FUNCTION_FAILED);
 341                }
 342                vha->marker_needed = 0;
 343        }
 344
 345        /* Acquire ring specific lock */
 346        spin_lock_irqsave(&ha->hardware_lock, flags);
 347
 348        /* Check for room in outstanding command list. */
 349        handle = req->current_outstanding_cmd;
 350        for (index = 1; index < req->num_outstanding_cmds; index++) {
 351                handle++;
 352                if (handle == req->num_outstanding_cmds)
 353                        handle = 1;
 354                if (!req->outstanding_cmds[handle])
 355                        break;
 356        }
 357        if (index == req->num_outstanding_cmds)
 358                goto queuing_error;
 359
 360        /* Map the sg table so we have an accurate count of sg entries needed */
 361        if (scsi_sg_count(cmd)) {
 362                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 363                    scsi_sg_count(cmd), cmd->sc_data_direction);
 364                if (unlikely(!nseg))
 365                        goto queuing_error;
 366        } else
 367                nseg = 0;
 368
 369        tot_dsds = nseg;
 370
 371        /* Calculate the number of request entries needed. */
 372        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 373        if (req->cnt < (req_cnt + 2)) {
 374                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
 375                if (req->ring_index < cnt)
 376                        req->cnt = cnt - req->ring_index;
 377                else
 378                        req->cnt = req->length -
 379                            (req->ring_index - cnt);
 380                /* If still no head room then bail out */
 381                if (req->cnt < (req_cnt + 2))
 382                        goto queuing_error;
 383        }
 384
 385        /* Build command packet */
 386        req->current_outstanding_cmd = handle;
 387        req->outstanding_cmds[handle] = sp;
 388        sp->handle = handle;
 389        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 390        req->cnt -= req_cnt;
 391
 392        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 393        cmd_pkt->handle = handle;
 394        /* Zero out remaining portion of packet. */
 395        clr_ptr = (uint32_t *)cmd_pkt + 2;
 396        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 397        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 398
 399        /* Set target ID and LUN number*/
 400        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 401        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 402        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 403
 404        /* Load SCSI command packet. */
 405        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 406        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 407
 408        /* Build IOCB segments */
 409        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 410
 411        /* Set total data segment count. */
 412        cmd_pkt->entry_count = (uint8_t)req_cnt;
 413        wmb();
 414
 415        /* Adjust ring index. */
 416        req->ring_index++;
 417        if (req->ring_index == req->length) {
 418                req->ring_index = 0;
 419                req->ring_ptr = req->ring;
 420        } else
 421                req->ring_ptr++;
 422
 423        sp->flags |= SRB_DMA_VALID;
 424
 425        /* Set chip new ring index. */
 426        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 427        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 428
 429        /* Manage unprocessed RIO/ZIO commands in response queue. */
 430        if (vha->flags.process_response_queue &&
 431            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 432                qla2x00_process_response_queue(rsp);
 433
 434        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 435        return (QLA_SUCCESS);
 436
 437queuing_error:
 438        if (tot_dsds)
 439                scsi_dma_unmap(cmd);
 440
 441        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 442
 443        return (QLA_FUNCTION_FAILED);
 444}
 445
 446/**
 447 * qla2x00_start_iocbs() - Execute the IOCB command
 448 */
 449void
 450qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 451{
 452        struct qla_hw_data *ha = vha->hw;
 453        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 454
 455        if (IS_P3P_TYPE(ha)) {
 456                qla82xx_start_iocbs(vha);
 457        } else {
 458                /* Adjust ring index. */
 459                req->ring_index++;
 460                if (req->ring_index == req->length) {
 461                        req->ring_index = 0;
 462                        req->ring_ptr = req->ring;
 463                } else
 464                        req->ring_ptr++;
 465
 466                /* Set chip new ring index. */
 467                if (ha->mqenable || IS_QLA27XX(ha)) {
 468                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 469                } else if (IS_QLA83XX(ha)) {
 470                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 471                        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 472                } else if (IS_QLAFX00(ha)) {
 473                        WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
 474                        RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
 475                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 476                } else if (IS_FWI2_CAPABLE(ha)) {
 477                        WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 478                        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
 479                } else {
 480                        WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
 481                                req->ring_index);
 482                        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
 483                }
 484        }
 485}
 486
 487/**
 488 * qla2x00_marker() - Send a marker IOCB to the firmware.
 489 * @ha: HA context
 490 * @loop_id: loop ID
 491 * @lun: LUN
 492 * @type: marker modifier
 493 *
 494 * Can be called from both normal and interrupt context.
 495 *
 496 * Returns non-zero if a failure occurred, else zero.
 497 */
 498static int
 499__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 500                        struct rsp_que *rsp, uint16_t loop_id,
 501                        uint64_t lun, uint8_t type)
 502{
 503        mrk_entry_t *mrk;
 504        struct mrk_entry_24xx *mrk24 = NULL;
 505
 506        struct qla_hw_data *ha = vha->hw;
 507        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 508
 509        req = ha->req_q_map[0];
 510        mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
 511        if (mrk == NULL) {
 512                ql_log(ql_log_warn, base_vha, 0x3026,
 513                    "Failed to allocate Marker IOCB.\n");
 514
 515                return (QLA_FUNCTION_FAILED);
 516        }
 517
 518        mrk->entry_type = MARKER_TYPE;
 519        mrk->modifier = type;
 520        if (type != MK_SYNC_ALL) {
 521                if (IS_FWI2_CAPABLE(ha)) {
 522                        mrk24 = (struct mrk_entry_24xx *) mrk;
 523                        mrk24->nport_handle = cpu_to_le16(loop_id);
 524                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 525                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 526                        mrk24->vp_index = vha->vp_idx;
 527                        mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
 528                } else {
 529                        SET_TARGET_ID(ha, mrk->target, loop_id);
 530                        mrk->lun = cpu_to_le16((uint16_t)lun);
 531                }
 532        }
 533        wmb();
 534
 535        qla2x00_start_iocbs(vha, req);
 536
 537        return (QLA_SUCCESS);
 538}
 539
 540int
 541qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 542                struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
 543                uint8_t type)
 544{
 545        int ret;
 546        unsigned long flags = 0;
 547
 548        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 549        ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
 550        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 551
 552        return (ret);
 553}
 554
 555/*
 556 * qla2x00_issue_marker
 557 *
 558 * Issue marker
 559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 560 * Might release it, then reaquire.
 561 */
 562int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 563{
 564        if (ha_locked) {
 565                if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
 566                                        MK_SYNC_ALL) != QLA_SUCCESS)
 567                        return QLA_FUNCTION_FAILED;
 568        } else {
 569                if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
 570                                        MK_SYNC_ALL) != QLA_SUCCESS)
 571                        return QLA_FUNCTION_FAILED;
 572        }
 573        vha->marker_needed = 0;
 574
 575        return QLA_SUCCESS;
 576}
 577
 578static inline int
 579qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 580        uint16_t tot_dsds)
 581{
 582        uint32_t *cur_dsd = NULL;
 583        scsi_qla_host_t *vha;
 584        struct qla_hw_data *ha;
 585        struct scsi_cmnd *cmd;
 586        struct  scatterlist *cur_seg;
 587        uint32_t *dsd_seg;
 588        void *next_dsd;
 589        uint8_t avail_dsds;
 590        uint8_t first_iocb = 1;
 591        uint32_t dsd_list_len;
 592        struct dsd_dma *dsd_ptr;
 593        struct ct6_dsd *ctx;
 594
 595        cmd = GET_CMD_SP(sp);
 596
 597        /* Update entry type to indicate Command Type 3 IOCB */
 598        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
 599
 600        /* No data transfer */
 601        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 602                cmd_pkt->byte_count = cpu_to_le32(0);
 603                return 0;
 604        }
 605
 606        vha = sp->vha;
 607        ha = vha->hw;
 608
 609        /* Set transfer direction */
 610        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 611                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 612                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 613                vha->qla_stats.output_requests++;
 614        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 615                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 616                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 617                vha->qla_stats.input_requests++;
 618        }
 619
 620        cur_seg = scsi_sglist(cmd);
 621        ctx = GET_CMD_CTX_SP(sp);
 622
 623        while (tot_dsds) {
 624                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 625                    QLA_DSDS_PER_IOCB : tot_dsds;
 626                tot_dsds -= avail_dsds;
 627                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 628
 629                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 630                    struct dsd_dma, list);
 631                next_dsd = dsd_ptr->dsd_addr;
 632                list_del(&dsd_ptr->list);
 633                ha->gbl_dsd_avail--;
 634                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 635                ctx->dsd_use_cnt++;
 636                ha->gbl_dsd_inuse++;
 637
 638                if (first_iocb) {
 639                        first_iocb = 0;
 640                        dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
 641                        *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 642                        *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 643                        cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
 644                } else {
 645                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 646                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 647                        *cur_dsd++ = cpu_to_le32(dsd_list_len);
 648                }
 649                cur_dsd = (uint32_t *)next_dsd;
 650                while (avail_dsds) {
 651                        dma_addr_t      sle_dma;
 652
 653                        sle_dma = sg_dma_address(cur_seg);
 654                        *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 655                        *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 656                        *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
 657                        cur_seg = sg_next(cur_seg);
 658                        avail_dsds--;
 659                }
 660        }
 661
 662        /* Null termination */
 663        *cur_dsd++ =  0;
 664        *cur_dsd++ = 0;
 665        *cur_dsd++ = 0;
 666        cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 667        return 0;
 668}
 669
 670/*
 671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 672 * for Command Type 6.
 673 *
 674 * @dsds: number of data segment decriptors needed
 675 *
 676 * Returns the number of dsd list needed to store @dsds.
 677 */
 678static inline uint16_t
 679qla24xx_calc_dsd_lists(uint16_t dsds)
 680{
 681        uint16_t dsd_lists = 0;
 682
 683        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 684        if (dsds % QLA_DSDS_PER_IOCB)
 685                dsd_lists++;
 686        return dsd_lists;
 687}
 688
 689
 690/**
 691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 692 * IOCB types.
 693 *
 694 * @sp: SRB command to process
 695 * @cmd_pkt: Command type 3 IOCB
 696 * @tot_dsds: Total number of segments to transfer
 697 * @req: pointer to request queue
 698 */
 699inline void
 700qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 701        uint16_t tot_dsds, struct req_que *req)
 702{
 703        uint16_t        avail_dsds;
 704        uint32_t        *cur_dsd;
 705        scsi_qla_host_t *vha;
 706        struct scsi_cmnd *cmd;
 707        struct scatterlist *sg;
 708        int i;
 709
 710        cmd = GET_CMD_SP(sp);
 711
 712        /* Update entry type to indicate Command Type 3 IOCB */
 713        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
 714
 715        /* No data transfer */
 716        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 717                cmd_pkt->byte_count = cpu_to_le32(0);
 718                return;
 719        }
 720
 721        vha = sp->vha;
 722
 723        /* Set transfer direction */
 724        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 725                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 726                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 727                vha->qla_stats.output_requests++;
 728        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 729                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 730                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 731                vha->qla_stats.input_requests++;
 732        }
 733
 734        /* One DSD is available in the Command Type 3 IOCB */
 735        avail_dsds = 1;
 736        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 737
 738        /* Load data segments */
 739
 740        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 741                dma_addr_t      sle_dma;
 742                cont_a64_entry_t *cont_pkt;
 743
 744                /* Allocate additional continuation packets? */
 745                if (avail_dsds == 0) {
 746                        /*
 747                         * Five DSDs are available in the Continuation
 748                         * Type 1 IOCB.
 749                         */
 750                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 751                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 752                        avail_dsds = 5;
 753                }
 754
 755                sle_dma = sg_dma_address(sg);
 756                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 757                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 758                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 759                avail_dsds--;
 760        }
 761}
 762
 763struct fw_dif_context {
 764        uint32_t ref_tag;
 765        uint16_t app_tag;
 766        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 767        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 768};
 769
 770/*
 771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 772 *
 773 */
 774static inline void
 775qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 776    unsigned int protcnt)
 777{
 778        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 779
 780        switch (scsi_get_prot_type(cmd)) {
 781        case SCSI_PROT_DIF_TYPE0:
 782                /*
 783                 * No check for ql2xenablehba_err_chk, as it would be an
 784                 * I/O error if hba tag generation is not done.
 785                 */
 786                pkt->ref_tag = cpu_to_le32((uint32_t)
 787                    (0xffffffff & scsi_get_lba(cmd)));
 788
 789                if (!qla2x00_hba_err_chk_enabled(sp))
 790                        break;
 791
 792                pkt->ref_tag_mask[0] = 0xff;
 793                pkt->ref_tag_mask[1] = 0xff;
 794                pkt->ref_tag_mask[2] = 0xff;
 795                pkt->ref_tag_mask[3] = 0xff;
 796                break;
 797
 798        /*
 799         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 800         * match LBA in CDB + N
 801         */
 802        case SCSI_PROT_DIF_TYPE2:
 803                pkt->app_tag = cpu_to_le16(0);
 804                pkt->app_tag_mask[0] = 0x0;
 805                pkt->app_tag_mask[1] = 0x0;
 806
 807                pkt->ref_tag = cpu_to_le32((uint32_t)
 808                    (0xffffffff & scsi_get_lba(cmd)));
 809
 810                if (!qla2x00_hba_err_chk_enabled(sp))
 811                        break;
 812
 813                /* enable ALL bytes of the ref tag */
 814                pkt->ref_tag_mask[0] = 0xff;
 815                pkt->ref_tag_mask[1] = 0xff;
 816                pkt->ref_tag_mask[2] = 0xff;
 817                pkt->ref_tag_mask[3] = 0xff;
 818                break;
 819
 820        /* For Type 3 protection: 16 bit GUARD only */
 821        case SCSI_PROT_DIF_TYPE3:
 822                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 823                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 824                                                                0x00;
 825                break;
 826
 827        /*
 828         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 829         * 16 bit app tag.
 830         */
 831        case SCSI_PROT_DIF_TYPE1:
 832                pkt->ref_tag = cpu_to_le32((uint32_t)
 833                    (0xffffffff & scsi_get_lba(cmd)));
 834                pkt->app_tag = cpu_to_le16(0);
 835                pkt->app_tag_mask[0] = 0x0;
 836                pkt->app_tag_mask[1] = 0x0;
 837
 838                if (!qla2x00_hba_err_chk_enabled(sp))
 839                        break;
 840
 841                /* enable ALL bytes of the ref tag */
 842                pkt->ref_tag_mask[0] = 0xff;
 843                pkt->ref_tag_mask[1] = 0xff;
 844                pkt->ref_tag_mask[2] = 0xff;
 845                pkt->ref_tag_mask[3] = 0xff;
 846                break;
 847        }
 848}
 849
 850int
 851qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 852        uint32_t *partial)
 853{
 854        struct scatterlist *sg;
 855        uint32_t cumulative_partial, sg_len;
 856        dma_addr_t sg_dma_addr;
 857
 858        if (sgx->num_bytes == sgx->tot_bytes)
 859                return 0;
 860
 861        sg = sgx->cur_sg;
 862        cumulative_partial = sgx->tot_partial;
 863
 864        sg_dma_addr = sg_dma_address(sg);
 865        sg_len = sg_dma_len(sg);
 866
 867        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 868
 869        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 870                sgx->dma_len = (blk_sz - cumulative_partial);
 871                sgx->tot_partial = 0;
 872                sgx->num_bytes += blk_sz;
 873                *partial = 0;
 874        } else {
 875                sgx->dma_len = sg_len - sgx->bytes_consumed;
 876                sgx->tot_partial += sgx->dma_len;
 877                *partial = 1;
 878        }
 879
 880        sgx->bytes_consumed += sgx->dma_len;
 881
 882        if (sg_len == sgx->bytes_consumed) {
 883                sg = sg_next(sg);
 884                sgx->num_sg++;
 885                sgx->cur_sg = sg;
 886                sgx->bytes_consumed = 0;
 887        }
 888
 889        return 1;
 890}
 891
 892int
 893qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 894        uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 895{
 896        void *next_dsd;
 897        uint8_t avail_dsds = 0;
 898        uint32_t dsd_list_len;
 899        struct dsd_dma *dsd_ptr;
 900        struct scatterlist *sg_prot;
 901        uint32_t *cur_dsd = dsd;
 902        uint16_t        used_dsds = tot_dsds;
 903        uint32_t        prot_int; /* protection interval */
 904        uint32_t        partial;
 905        struct qla2_sgx sgx;
 906        dma_addr_t      sle_dma;
 907        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 908        struct scsi_cmnd *cmd;
 909
 910        memset(&sgx, 0, sizeof(struct qla2_sgx));
 911        if (sp) {
 912                cmd = GET_CMD_SP(sp);
 913                prot_int = cmd->device->sector_size;
 914
 915                sgx.tot_bytes = scsi_bufflen(cmd);
 916                sgx.cur_sg = scsi_sglist(cmd);
 917                sgx.sp = sp;
 918
 919                sg_prot = scsi_prot_sglist(cmd);
 920        } else if (tc) {
 921                prot_int      = tc->blk_sz;
 922                sgx.tot_bytes = tc->bufflen;
 923                sgx.cur_sg    = tc->sg;
 924                sg_prot       = tc->prot_sg;
 925        } else {
 926                BUG();
 927                return 1;
 928        }
 929
 930        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 931
 932                sle_dma = sgx.dma_addr;
 933                sle_dma_len = sgx.dma_len;
 934alloc_and_fill:
 935                /* Allocate additional continuation packets? */
 936                if (avail_dsds == 0) {
 937                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 938                                        QLA_DSDS_PER_IOCB : used_dsds;
 939                        dsd_list_len = (avail_dsds + 1) * 12;
 940                        used_dsds -= avail_dsds;
 941
 942                        /* allocate tracking DS */
 943                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 944                        if (!dsd_ptr)
 945                                return 1;
 946
 947                        /* allocate new list */
 948                        dsd_ptr->dsd_addr = next_dsd =
 949                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 950                                &dsd_ptr->dsd_list_dma);
 951
 952                        if (!next_dsd) {
 953                                /*
 954                                 * Need to cleanup only this dsd_ptr, rest
 955                                 * will be done by sp_free_dma()
 956                                 */
 957                                kfree(dsd_ptr);
 958                                return 1;
 959                        }
 960
 961                        if (sp) {
 962                                list_add_tail(&dsd_ptr->list,
 963                                    &((struct crc_context *)
 964                                            sp->u.scmd.ctx)->dsd_list);
 965
 966                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 967                        } else {
 968                                list_add_tail(&dsd_ptr->list,
 969                                    &(tc->ctx->dsd_list));
 970                                *tc->ctx_dsd_alloced = 1;
 971                        }
 972
 973
 974                        /* add new list to cmd iocb or last list */
 975                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 976                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 977                        *cur_dsd++ = dsd_list_len;
 978                        cur_dsd = (uint32_t *)next_dsd;
 979                }
 980                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 981                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 982                *cur_dsd++ = cpu_to_le32(sle_dma_len);
 983                avail_dsds--;
 984
 985                if (partial == 0) {
 986                        /* Got a full protection interval */
 987                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 988                        sle_dma_len = 8;
 989
 990                        tot_prot_dma_len += sle_dma_len;
 991                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 992                                tot_prot_dma_len = 0;
 993                                sg_prot = sg_next(sg_prot);
 994                        }
 995
 996                        partial = 1; /* So as to not re-enter this block */
 997                        goto alloc_and_fill;
 998                }
 999        }
1000        /* Null termination */
1001        *cur_dsd++ = 0;
1002        *cur_dsd++ = 0;
1003        *cur_dsd++ = 0;
1004        return 0;
1005}
1006
1007int
1008qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009        uint16_t tot_dsds, struct qla_tc_param *tc)
1010{
1011        void *next_dsd;
1012        uint8_t avail_dsds = 0;
1013        uint32_t dsd_list_len;
1014        struct dsd_dma *dsd_ptr;
1015        struct scatterlist *sg, *sgl;
1016        uint32_t *cur_dsd = dsd;
1017        int     i;
1018        uint16_t        used_dsds = tot_dsds;
1019        struct scsi_cmnd *cmd;
1020
1021        if (sp) {
1022                cmd = GET_CMD_SP(sp);
1023                sgl = scsi_sglist(cmd);
1024        } else if (tc) {
1025                sgl = tc->sg;
1026        } else {
1027                BUG();
1028                return 1;
1029        }
1030
1031
1032        for_each_sg(sgl, sg, tot_dsds, i) {
1033                dma_addr_t      sle_dma;
1034
1035                /* Allocate additional continuation packets? */
1036                if (avail_dsds == 0) {
1037                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038                                        QLA_DSDS_PER_IOCB : used_dsds;
1039                        dsd_list_len = (avail_dsds + 1) * 12;
1040                        used_dsds -= avail_dsds;
1041
1042                        /* allocate tracking DS */
1043                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044                        if (!dsd_ptr)
1045                                return 1;
1046
1047                        /* allocate new list */
1048                        dsd_ptr->dsd_addr = next_dsd =
1049                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050                                &dsd_ptr->dsd_list_dma);
1051
1052                        if (!next_dsd) {
1053                                /*
1054                                 * Need to cleanup only this dsd_ptr, rest
1055                                 * will be done by sp_free_dma()
1056                                 */
1057                                kfree(dsd_ptr);
1058                                return 1;
1059                        }
1060
1061                        if (sp) {
1062                                list_add_tail(&dsd_ptr->list,
1063                                    &((struct crc_context *)
1064                                            sp->u.scmd.ctx)->dsd_list);
1065
1066                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067                        } else {
1068                                list_add_tail(&dsd_ptr->list,
1069                                    &(tc->ctx->dsd_list));
1070                                *tc->ctx_dsd_alloced = 1;
1071                        }
1072
1073                        /* add new list to cmd iocb or last list */
1074                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076                        *cur_dsd++ = dsd_list_len;
1077                        cur_dsd = (uint32_t *)next_dsd;
1078                }
1079                sle_dma = sg_dma_address(sg);
1080
1081                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084                avail_dsds--;
1085
1086        }
1087        /* Null termination */
1088        *cur_dsd++ = 0;
1089        *cur_dsd++ = 0;
1090        *cur_dsd++ = 0;
1091        return 0;
1092}
1093
1094int
1095qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096        uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1097{
1098        void *next_dsd;
1099        uint8_t avail_dsds = 0;
1100        uint32_t dsd_list_len;
1101        struct dsd_dma *dsd_ptr;
1102        struct scatterlist *sg, *sgl;
1103        int     i;
1104        struct scsi_cmnd *cmd;
1105        uint32_t *cur_dsd = dsd;
1106        uint16_t used_dsds = tot_dsds;
1107        struct scsi_qla_host *vha;
1108
1109        if (sp) {
1110                cmd = GET_CMD_SP(sp);
1111                sgl = scsi_prot_sglist(cmd);
1112                vha = sp->vha;
1113        } else if (tc) {
1114                vha = tc->vha;
1115                sgl = tc->prot_sg;
1116        } else {
1117                BUG();
1118                return 1;
1119        }
1120
1121        ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122                "%s: enter\n", __func__);
1123
1124        for_each_sg(sgl, sg, tot_dsds, i) {
1125                dma_addr_t      sle_dma;
1126
1127                /* Allocate additional continuation packets? */
1128                if (avail_dsds == 0) {
1129                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130                                                QLA_DSDS_PER_IOCB : used_dsds;
1131                        dsd_list_len = (avail_dsds + 1) * 12;
1132                        used_dsds -= avail_dsds;
1133
1134                        /* allocate tracking DS */
1135                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136                        if (!dsd_ptr)
1137                                return 1;
1138
1139                        /* allocate new list */
1140                        dsd_ptr->dsd_addr = next_dsd =
1141                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142                                &dsd_ptr->dsd_list_dma);
1143
1144                        if (!next_dsd) {
1145                                /*
1146                                 * Need to cleanup only this dsd_ptr, rest
1147                                 * will be done by sp_free_dma()
1148                                 */
1149                                kfree(dsd_ptr);
1150                                return 1;
1151                        }
1152
1153                        if (sp) {
1154                                list_add_tail(&dsd_ptr->list,
1155                                    &((struct crc_context *)
1156                                            sp->u.scmd.ctx)->dsd_list);
1157
1158                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159                        } else {
1160                                list_add_tail(&dsd_ptr->list,
1161                                    &(tc->ctx->dsd_list));
1162                                *tc->ctx_dsd_alloced = 1;
1163                        }
1164
1165                        /* add new list to cmd iocb or last list */
1166                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168                        *cur_dsd++ = dsd_list_len;
1169                        cur_dsd = (uint32_t *)next_dsd;
1170                }
1171                sle_dma = sg_dma_address(sg);
1172
1173                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1176
1177                avail_dsds--;
1178        }
1179        /* Null termination */
1180        *cur_dsd++ = 0;
1181        *cur_dsd++ = 0;
1182        *cur_dsd++ = 0;
1183        return 0;
1184}
1185
1186/**
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 *                                                      Type 6 IOCB types.
1189 *
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1193 */
1194inline int
1195qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197{
1198        uint32_t                *cur_dsd, *fcp_dl;
1199        scsi_qla_host_t         *vha;
1200        struct scsi_cmnd        *cmd;
1201        uint32_t                total_bytes = 0;
1202        uint32_t                data_bytes;
1203        uint32_t                dif_bytes;
1204        uint8_t                 bundling = 1;
1205        uint16_t                blk_size;
1206        uint8_t                 *clr_ptr;
1207        struct crc_context      *crc_ctx_pkt = NULL;
1208        struct qla_hw_data      *ha;
1209        uint8_t                 additional_fcpcdb_len;
1210        uint16_t                fcp_cmnd_len;
1211        struct fcp_cmnd         *fcp_cmnd;
1212        dma_addr_t              crc_ctx_dma;
1213
1214        cmd = GET_CMD_SP(sp);
1215
1216        /* Update entry type to indicate Command Type CRC_2 IOCB */
1217        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1218
1219        vha = sp->vha;
1220        ha = vha->hw;
1221
1222        /* No data transfer */
1223        data_bytes = scsi_bufflen(cmd);
1224        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225                cmd_pkt->byte_count = cpu_to_le32(0);
1226                return QLA_SUCCESS;
1227        }
1228
1229        cmd_pkt->vp_index = sp->vha->vp_idx;
1230
1231        /* Set transfer direction */
1232        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233                cmd_pkt->control_flags =
1234                    cpu_to_le16(CF_WRITE_DATA);
1235        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236                cmd_pkt->control_flags =
1237                    cpu_to_le16(CF_READ_DATA);
1238        }
1239
1240        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1244                bundling = 0;
1245
1246        /* Allocate CRC context from global pool */
1247        crc_ctx_pkt = sp->u.scmd.ctx =
1248            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1249
1250        if (!crc_ctx_pkt)
1251                goto crc_queuing_error;
1252
1253        /* Zero out CTX area. */
1254        clr_ptr = (uint8_t *)crc_ctx_pkt;
1255        memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1256
1257        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1258
1259        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1260
1261        /* Set handle */
1262        crc_ctx_pkt->handle = cmd_pkt->handle;
1263
1264        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1265
1266        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1268
1269        cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270        cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271        cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1272
1273        /* Determine SCSI command length -- align to 4 byte boundary */
1274        if (cmd->cmd_len > 16) {
1275                additional_fcpcdb_len = cmd->cmd_len - 16;
1276                if ((cmd->cmd_len % 4) != 0) {
1277                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1278                        goto crc_queuing_error;
1279                }
1280                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281        } else {
1282                additional_fcpcdb_len = 0;
1283                fcp_cmnd_len = 12 + 16 + 4;
1284        }
1285
1286        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1287
1288        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290                fcp_cmnd->additional_cdb_len |= 1;
1291        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292                fcp_cmnd->additional_cdb_len |= 2;
1293
1294        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297        cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298            LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299        cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300            MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301        fcp_cmnd->task_management = 0;
1302        fcp_cmnd->task_attribute = TSK_SIMPLE;
1303
1304        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1305
1306        /* Compute dif len and adjust data len to incude protection */
1307        dif_bytes = 0;
1308        blk_size = cmd->device->sector_size;
1309        dif_bytes = (data_bytes / blk_size) * 8;
1310
1311        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312        case SCSI_PROT_READ_INSERT:
1313        case SCSI_PROT_WRITE_STRIP:
1314            total_bytes = data_bytes;
1315            data_bytes += dif_bytes;
1316            break;
1317
1318        case SCSI_PROT_READ_STRIP:
1319        case SCSI_PROT_WRITE_INSERT:
1320        case SCSI_PROT_READ_PASS:
1321        case SCSI_PROT_WRITE_PASS:
1322            total_bytes = data_bytes + dif_bytes;
1323            break;
1324        default:
1325            BUG();
1326        }
1327
1328        if (!qla2x00_hba_err_chk_enabled(sp))
1329                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330        /* HBA error checking enabled */
1331        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334                        SCSI_PROT_DIF_TYPE2))
1335                        fw_prot_opts |= BIT_10;
1336                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337                    SCSI_PROT_DIF_TYPE3)
1338                        fw_prot_opts |= BIT_11;
1339        }
1340
1341        if (!bundling) {
1342                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343        } else {
1344                /*
1345                 * Configure Bundling if we need to fetch interlaving
1346                 * protection PCI accesses
1347                 */
1348                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351                                                        tot_prot_dsds);
1352                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1353        }
1354
1355        /* Finish the common fields of CRC pkt */
1356        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360        /* Fibre channel byte count */
1361        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362        fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363            additional_fcpcdb_len);
1364        *fcp_dl = htonl(total_bytes);
1365
1366        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367                cmd_pkt->byte_count = cpu_to_le32(0);
1368                return QLA_SUCCESS;
1369        }
1370        /* Walks data segments */
1371
1372        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1373
1374        if (!bundling && tot_prot_dsds) {
1375                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376                        cur_dsd, tot_dsds, NULL))
1377                        goto crc_queuing_error;
1378        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379                        (tot_dsds - tot_prot_dsds), NULL))
1380                goto crc_queuing_error;
1381
1382        if (bundling && tot_prot_dsds) {
1383                /* Walks dif segments */
1384                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387                                tot_prot_dsds, NULL))
1388                        goto crc_queuing_error;
1389        }
1390        return QLA_SUCCESS;
1391
1392crc_queuing_error:
1393        /* Cleanup will be performed by the caller */
1394
1395        return QLA_FUNCTION_FAILED;
1396}
1397
1398/**
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1401 *
1402 * Returns non-zero if a failure occurred, else zero.
1403 */
1404int
1405qla24xx_start_scsi(srb_t *sp)
1406{
1407        int             nseg;
1408        unsigned long   flags;
1409        uint32_t        *clr_ptr;
1410        uint32_t        index;
1411        uint32_t        handle;
1412        struct cmd_type_7 *cmd_pkt;
1413        uint16_t        cnt;
1414        uint16_t        req_cnt;
1415        uint16_t        tot_dsds;
1416        struct req_que *req = NULL;
1417        struct rsp_que *rsp = NULL;
1418        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419        struct scsi_qla_host *vha = sp->vha;
1420        struct qla_hw_data *ha = vha->hw;
1421
1422        /* Setup device pointers. */
1423        req = vha->req;
1424        rsp = req->rsp;
1425
1426        /* So we know we haven't pci_map'ed anything yet */
1427        tot_dsds = 0;
1428
1429        /* Send marker if required */
1430        if (vha->marker_needed != 0) {
1431                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432                    QLA_SUCCESS)
1433                        return QLA_FUNCTION_FAILED;
1434                vha->marker_needed = 0;
1435        }
1436
1437        /* Acquire ring specific lock */
1438        spin_lock_irqsave(&ha->hardware_lock, flags);
1439
1440        /* Check for room in outstanding command list. */
1441        handle = req->current_outstanding_cmd;
1442        for (index = 1; index < req->num_outstanding_cmds; index++) {
1443                handle++;
1444                if (handle == req->num_outstanding_cmds)
1445                        handle = 1;
1446                if (!req->outstanding_cmds[handle])
1447                        break;
1448        }
1449        if (index == req->num_outstanding_cmds)
1450                goto queuing_error;
1451
1452        /* Map the sg table so we have an accurate count of sg entries needed */
1453        if (scsi_sg_count(cmd)) {
1454                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455                    scsi_sg_count(cmd), cmd->sc_data_direction);
1456                if (unlikely(!nseg))
1457                        goto queuing_error;
1458        } else
1459                nseg = 0;
1460
1461        tot_dsds = nseg;
1462        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463        if (req->cnt < (req_cnt + 2)) {
1464                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465                    RD_REG_DWORD_RELAXED(req->req_q_out);
1466                if (req->ring_index < cnt)
1467                        req->cnt = cnt - req->ring_index;
1468                else
1469                        req->cnt = req->length -
1470                                (req->ring_index - cnt);
1471                if (req->cnt < (req_cnt + 2))
1472                        goto queuing_error;
1473        }
1474
1475        /* Build command packet. */
1476        req->current_outstanding_cmd = handle;
1477        req->outstanding_cmds[handle] = sp;
1478        sp->handle = handle;
1479        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480        req->cnt -= req_cnt;
1481
1482        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1484
1485        /* Zero out remaining portion of packet. */
1486        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487        clr_ptr = (uint32_t *)cmd_pkt + 2;
1488        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1490
1491        /* Set NPORT-ID and LUN number*/
1492        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496        cmd_pkt->vp_index = sp->vha->vp_idx;
1497
1498        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1500
1501        cmd_pkt->task = TSK_SIMPLE;
1502
1503        /* Load SCSI command packet. */
1504        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1506
1507        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1508
1509        /* Build IOCB segments */
1510        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1511
1512        /* Set total data segment count. */
1513        cmd_pkt->entry_count = (uint8_t)req_cnt;
1514        wmb();
1515        /* Adjust ring index. */
1516        req->ring_index++;
1517        if (req->ring_index == req->length) {
1518                req->ring_index = 0;
1519                req->ring_ptr = req->ring;
1520        } else
1521                req->ring_ptr++;
1522
1523        sp->flags |= SRB_DMA_VALID;
1524
1525        /* Set chip new ring index. */
1526        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1528
1529        /* Manage unprocessed RIO/ZIO commands in response queue. */
1530        if (vha->flags.process_response_queue &&
1531                rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1532                qla24xx_process_response_queue(vha, rsp);
1533
1534        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1535        return QLA_SUCCESS;
1536
1537queuing_error:
1538        if (tot_dsds)
1539                scsi_dma_unmap(cmd);
1540
1541        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1542
1543        return QLA_FUNCTION_FAILED;
1544}
1545
1546/**
1547 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548 * @sp: command to send to the ISP
1549 *
1550 * Returns non-zero if a failure occurred, else zero.
1551 */
1552int
1553qla24xx_dif_start_scsi(srb_t *sp)
1554{
1555        int                     nseg;
1556        unsigned long           flags;
1557        uint32_t                *clr_ptr;
1558        uint32_t                index;
1559        uint32_t                handle;
1560        uint16_t                cnt;
1561        uint16_t                req_cnt = 0;
1562        uint16_t                tot_dsds;
1563        uint16_t                tot_prot_dsds;
1564        uint16_t                fw_prot_opts = 0;
1565        struct req_que          *req = NULL;
1566        struct rsp_que          *rsp = NULL;
1567        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1568        struct scsi_qla_host    *vha = sp->vha;
1569        struct qla_hw_data      *ha = vha->hw;
1570        struct cmd_type_crc_2   *cmd_pkt;
1571        uint32_t                status = 0;
1572
1573#define QDSS_GOT_Q_SPACE        BIT_0
1574
1575        /* Only process protection or >16 cdb in this routine */
1576        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577                if (cmd->cmd_len <= 16)
1578                        return qla24xx_start_scsi(sp);
1579        }
1580
1581        /* Setup device pointers. */
1582        req = vha->req;
1583        rsp = req->rsp;
1584
1585        /* So we know we haven't pci_map'ed anything yet */
1586        tot_dsds = 0;
1587
1588        /* Send marker if required */
1589        if (vha->marker_needed != 0) {
1590                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1591                    QLA_SUCCESS)
1592                        return QLA_FUNCTION_FAILED;
1593                vha->marker_needed = 0;
1594        }
1595
1596        /* Acquire ring specific lock */
1597        spin_lock_irqsave(&ha->hardware_lock, flags);
1598
1599        /* Check for room in outstanding command list. */
1600        handle = req->current_outstanding_cmd;
1601        for (index = 1; index < req->num_outstanding_cmds; index++) {
1602                handle++;
1603                if (handle == req->num_outstanding_cmds)
1604                        handle = 1;
1605                if (!req->outstanding_cmds[handle])
1606                        break;
1607        }
1608
1609        if (index == req->num_outstanding_cmds)
1610                goto queuing_error;
1611
1612        /* Compute number of required data segments */
1613        /* Map the sg table so we have an accurate count of sg entries needed */
1614        if (scsi_sg_count(cmd)) {
1615                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616                    scsi_sg_count(cmd), cmd->sc_data_direction);
1617                if (unlikely(!nseg))
1618                        goto queuing_error;
1619                else
1620                        sp->flags |= SRB_DMA_VALID;
1621
1622                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624                        struct qla2_sgx sgx;
1625                        uint32_t        partial;
1626
1627                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1628                        sgx.tot_bytes = scsi_bufflen(cmd);
1629                        sgx.cur_sg = scsi_sglist(cmd);
1630                        sgx.sp = sp;
1631
1632                        nseg = 0;
1633                        while (qla24xx_get_one_block_sg(
1634                            cmd->device->sector_size, &sgx, &partial))
1635                                nseg++;
1636                }
1637        } else
1638                nseg = 0;
1639
1640        /* number of required data segments */
1641        tot_dsds = nseg;
1642
1643        /* Compute number of required protection segments */
1644        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647                if (unlikely(!nseg))
1648                        goto queuing_error;
1649                else
1650                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1651
1652                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1655                }
1656        } else {
1657                nseg = 0;
1658        }
1659
1660        req_cnt = 1;
1661        /* Total Data and protection sg segment(s) */
1662        tot_prot_dsds = nseg;
1663        tot_dsds += nseg;
1664        if (req->cnt < (req_cnt + 2)) {
1665                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666                    RD_REG_DWORD_RELAXED(req->req_q_out);
1667                if (req->ring_index < cnt)
1668                        req->cnt = cnt - req->ring_index;
1669                else
1670                        req->cnt = req->length -
1671                                (req->ring_index - cnt);
1672                if (req->cnt < (req_cnt + 2))
1673                        goto queuing_error;
1674        }
1675
1676        status |= QDSS_GOT_Q_SPACE;
1677
1678        /* Build header part of command packet (excluding the OPCODE). */
1679        req->current_outstanding_cmd = handle;
1680        req->outstanding_cmds[handle] = sp;
1681        sp->handle = handle;
1682        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1683        req->cnt -= req_cnt;
1684
1685        /* Fill-in common area */
1686        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1688
1689        clr_ptr = (uint32_t *)cmd_pkt + 2;
1690        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1691
1692        /* Set NPORT-ID and LUN number*/
1693        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1697
1698        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1699        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1700
1701        /* Total Data and protection segment(s) */
1702        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1703
1704        /* Build IOCB segments and adjust for data protection segments */
1705        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1707                QLA_SUCCESS)
1708                goto queuing_error;
1709
1710        cmd_pkt->entry_count = (uint8_t)req_cnt;
1711        /* Specify response queue number where completion should happen */
1712        cmd_pkt->entry_status = (uint8_t) rsp->id;
1713        cmd_pkt->timeout = cpu_to_le16(0);
1714        wmb();
1715
1716        /* Adjust ring index. */
1717        req->ring_index++;
1718        if (req->ring_index == req->length) {
1719                req->ring_index = 0;
1720                req->ring_ptr = req->ring;
1721        } else
1722                req->ring_ptr++;
1723
1724        /* Set chip new ring index. */
1725        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1727
1728        /* Manage unprocessed RIO/ZIO commands in response queue. */
1729        if (vha->flags.process_response_queue &&
1730            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731                qla24xx_process_response_queue(vha, rsp);
1732
1733        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1734
1735        return QLA_SUCCESS;
1736
1737queuing_error:
1738        if (status & QDSS_GOT_Q_SPACE) {
1739                req->outstanding_cmds[handle] = NULL;
1740                req->cnt += req_cnt;
1741        }
1742        /* Cleanup will be performed by the caller (queuecommand) */
1743
1744        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1745        return QLA_FUNCTION_FAILED;
1746}
1747
1748/**
1749 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750 * @sp: command to send to the ISP
1751 *
1752 * Returns non-zero if a failure occurred, else zero.
1753 */
1754static int
1755qla2xxx_start_scsi_mq(srb_t *sp)
1756{
1757        int             nseg;
1758        unsigned long   flags;
1759        uint32_t        *clr_ptr;
1760        uint32_t        index;
1761        uint32_t        handle;
1762        struct cmd_type_7 *cmd_pkt;
1763        uint16_t        cnt;
1764        uint16_t        req_cnt;
1765        uint16_t        tot_dsds;
1766        struct req_que *req = NULL;
1767        struct rsp_que *rsp = NULL;
1768        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1769        struct scsi_qla_host *vha = sp->fcport->vha;
1770        struct qla_hw_data *ha = vha->hw;
1771        struct qla_qpair *qpair = sp->qpair;
1772
1773        /* Acquire qpair specific lock */
1774        spin_lock_irqsave(&qpair->qp_lock, flags);
1775
1776        /* Setup qpair pointers */
1777        rsp = qpair->rsp;
1778        req = qpair->req;
1779
1780        /* So we know we haven't pci_map'ed anything yet */
1781        tot_dsds = 0;
1782
1783        /* Send marker if required */
1784        if (vha->marker_needed != 0) {
1785                if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1786                    QLA_SUCCESS) {
1787                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1788                        return QLA_FUNCTION_FAILED;
1789                }
1790                vha->marker_needed = 0;
1791        }
1792
1793        /* Check for room in outstanding command list. */
1794        handle = req->current_outstanding_cmd;
1795        for (index = 1; index < req->num_outstanding_cmds; index++) {
1796                handle++;
1797                if (handle == req->num_outstanding_cmds)
1798                        handle = 1;
1799                if (!req->outstanding_cmds[handle])
1800                        break;
1801        }
1802        if (index == req->num_outstanding_cmds)
1803                goto queuing_error;
1804
1805        /* Map the sg table so we have an accurate count of sg entries needed */
1806        if (scsi_sg_count(cmd)) {
1807                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1808                    scsi_sg_count(cmd), cmd->sc_data_direction);
1809                if (unlikely(!nseg))
1810                        goto queuing_error;
1811        } else
1812                nseg = 0;
1813
1814        tot_dsds = nseg;
1815        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1816        if (req->cnt < (req_cnt + 2)) {
1817                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1818                    RD_REG_DWORD_RELAXED(req->req_q_out);
1819                if (req->ring_index < cnt)
1820                        req->cnt = cnt - req->ring_index;
1821                else
1822                        req->cnt = req->length -
1823                                (req->ring_index - cnt);
1824                if (req->cnt < (req_cnt + 2))
1825                        goto queuing_error;
1826        }
1827
1828        /* Build command packet. */
1829        req->current_outstanding_cmd = handle;
1830        req->outstanding_cmds[handle] = sp;
1831        sp->handle = handle;
1832        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1833        req->cnt -= req_cnt;
1834
1835        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1836        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1837
1838        /* Zero out remaining portion of packet. */
1839        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1840        clr_ptr = (uint32_t *)cmd_pkt + 2;
1841        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1842        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1843
1844        /* Set NPORT-ID and LUN number*/
1845        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1846        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1847        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1848        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1849        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1850
1851        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1852        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1853
1854        cmd_pkt->task = TSK_SIMPLE;
1855
1856        /* Load SCSI command packet. */
1857        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1858        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1859
1860        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1861
1862        /* Build IOCB segments */
1863        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1864
1865        /* Set total data segment count. */
1866        cmd_pkt->entry_count = (uint8_t)req_cnt;
1867        wmb();
1868        /* Adjust ring index. */
1869        req->ring_index++;
1870        if (req->ring_index == req->length) {
1871                req->ring_index = 0;
1872                req->ring_ptr = req->ring;
1873        } else
1874                req->ring_ptr++;
1875
1876        sp->flags |= SRB_DMA_VALID;
1877
1878        /* Set chip new ring index. */
1879        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1880
1881        /* Manage unprocessed RIO/ZIO commands in response queue. */
1882        if (vha->flags.process_response_queue &&
1883                rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1884                qla24xx_process_response_queue(vha, rsp);
1885
1886        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1887        return QLA_SUCCESS;
1888
1889queuing_error:
1890        if (tot_dsds)
1891                scsi_dma_unmap(cmd);
1892
1893        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1894
1895        return QLA_FUNCTION_FAILED;
1896}
1897
1898
1899/**
1900 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1901 * @sp: command to send to the ISP
1902 *
1903 * Returns non-zero if a failure occurred, else zero.
1904 */
1905int
1906qla2xxx_dif_start_scsi_mq(srb_t *sp)
1907{
1908        int                     nseg;
1909        unsigned long           flags;
1910        uint32_t                *clr_ptr;
1911        uint32_t                index;
1912        uint32_t                handle;
1913        uint16_t                cnt;
1914        uint16_t                req_cnt = 0;
1915        uint16_t                tot_dsds;
1916        uint16_t                tot_prot_dsds;
1917        uint16_t                fw_prot_opts = 0;
1918        struct req_que          *req = NULL;
1919        struct rsp_que          *rsp = NULL;
1920        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1921        struct scsi_qla_host    *vha = sp->fcport->vha;
1922        struct qla_hw_data      *ha = vha->hw;
1923        struct cmd_type_crc_2   *cmd_pkt;
1924        uint32_t                status = 0;
1925        struct qla_qpair        *qpair = sp->qpair;
1926
1927#define QDSS_GOT_Q_SPACE        BIT_0
1928
1929        /* Check for host side state */
1930        if (!qpair->online) {
1931                cmd->result = DID_NO_CONNECT << 16;
1932                return QLA_INTERFACE_ERROR;
1933        }
1934
1935        if (!qpair->difdix_supported &&
1936                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1937                cmd->result = DID_NO_CONNECT << 16;
1938                return QLA_INTERFACE_ERROR;
1939        }
1940
1941        /* Only process protection or >16 cdb in this routine */
1942        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1943                if (cmd->cmd_len <= 16)
1944                        return qla2xxx_start_scsi_mq(sp);
1945        }
1946
1947        spin_lock_irqsave(&qpair->qp_lock, flags);
1948
1949        /* Setup qpair pointers */
1950        rsp = qpair->rsp;
1951        req = qpair->req;
1952
1953        /* So we know we haven't pci_map'ed anything yet */
1954        tot_dsds = 0;
1955
1956        /* Send marker if required */
1957        if (vha->marker_needed != 0) {
1958                if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1959                    QLA_SUCCESS) {
1960                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1961                        return QLA_FUNCTION_FAILED;
1962                }
1963                vha->marker_needed = 0;
1964        }
1965
1966        /* Check for room in outstanding command list. */
1967        handle = req->current_outstanding_cmd;
1968        for (index = 1; index < req->num_outstanding_cmds; index++) {
1969                handle++;
1970                if (handle == req->num_outstanding_cmds)
1971                        handle = 1;
1972                if (!req->outstanding_cmds[handle])
1973                        break;
1974        }
1975
1976        if (index == req->num_outstanding_cmds)
1977                goto queuing_error;
1978
1979        /* Compute number of required data segments */
1980        /* Map the sg table so we have an accurate count of sg entries needed */
1981        if (scsi_sg_count(cmd)) {
1982                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1983                    scsi_sg_count(cmd), cmd->sc_data_direction);
1984                if (unlikely(!nseg))
1985                        goto queuing_error;
1986                else
1987                        sp->flags |= SRB_DMA_VALID;
1988
1989                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1990                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1991                        struct qla2_sgx sgx;
1992                        uint32_t        partial;
1993
1994                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1995                        sgx.tot_bytes = scsi_bufflen(cmd);
1996                        sgx.cur_sg = scsi_sglist(cmd);
1997                        sgx.sp = sp;
1998
1999                        nseg = 0;
2000                        while (qla24xx_get_one_block_sg(
2001                            cmd->device->sector_size, &sgx, &partial))
2002                                nseg++;
2003                }
2004        } else
2005                nseg = 0;
2006
2007        /* number of required data segments */
2008        tot_dsds = nseg;
2009
2010        /* Compute number of required protection segments */
2011        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2012                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2013                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2014                if (unlikely(!nseg))
2015                        goto queuing_error;
2016                else
2017                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
2018
2019                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2020                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2021                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2022                }
2023        } else {
2024                nseg = 0;
2025        }
2026
2027        req_cnt = 1;
2028        /* Total Data and protection sg segment(s) */
2029        tot_prot_dsds = nseg;
2030        tot_dsds += nseg;
2031        if (req->cnt < (req_cnt + 2)) {
2032                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2033                    RD_REG_DWORD_RELAXED(req->req_q_out);
2034                if (req->ring_index < cnt)
2035                        req->cnt = cnt - req->ring_index;
2036                else
2037                        req->cnt = req->length -
2038                                (req->ring_index - cnt);
2039                if (req->cnt < (req_cnt + 2))
2040                        goto queuing_error;
2041        }
2042
2043        status |= QDSS_GOT_Q_SPACE;
2044
2045        /* Build header part of command packet (excluding the OPCODE). */
2046        req->current_outstanding_cmd = handle;
2047        req->outstanding_cmds[handle] = sp;
2048        sp->handle = handle;
2049        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2050        req->cnt -= req_cnt;
2051
2052        /* Fill-in common area */
2053        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2054        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2055
2056        clr_ptr = (uint32_t *)cmd_pkt + 2;
2057        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2058
2059        /* Set NPORT-ID and LUN number*/
2060        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2061        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2062        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2063        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2064
2065        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2066        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2067
2068        /* Total Data and protection segment(s) */
2069        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2070
2071        /* Build IOCB segments and adjust for data protection segments */
2072        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2073            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2074                QLA_SUCCESS)
2075                goto queuing_error;
2076
2077        cmd_pkt->entry_count = (uint8_t)req_cnt;
2078        cmd_pkt->timeout = cpu_to_le16(0);
2079        wmb();
2080
2081        /* Adjust ring index. */
2082        req->ring_index++;
2083        if (req->ring_index == req->length) {
2084                req->ring_index = 0;
2085                req->ring_ptr = req->ring;
2086        } else
2087                req->ring_ptr++;
2088
2089        /* Set chip new ring index. */
2090        WRT_REG_DWORD(req->req_q_in, req->ring_index);
2091
2092        /* Manage unprocessed RIO/ZIO commands in response queue. */
2093        if (vha->flags.process_response_queue &&
2094            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2095                qla24xx_process_response_queue(vha, rsp);
2096
2097        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2098
2099        return QLA_SUCCESS;
2100
2101queuing_error:
2102        if (status & QDSS_GOT_Q_SPACE) {
2103                req->outstanding_cmds[handle] = NULL;
2104                req->cnt += req_cnt;
2105        }
2106        /* Cleanup will be performed by the caller (queuecommand) */
2107
2108        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2109        return QLA_FUNCTION_FAILED;
2110}
2111
2112/* Generic Control-SRB manipulation functions. */
2113
2114/* hardware_lock assumed to be held. */
2115
2116void *
2117__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2118{
2119        scsi_qla_host_t *vha = qpair->vha;
2120        struct qla_hw_data *ha = vha->hw;
2121        struct req_que *req = qpair->req;
2122        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2123        uint32_t index, handle;
2124        request_t *pkt;
2125        uint16_t cnt, req_cnt;
2126
2127        pkt = NULL;
2128        req_cnt = 1;
2129        handle = 0;
2130
2131        if (!sp)
2132                goto skip_cmd_array;
2133
2134        /* Check for room in outstanding command list. */
2135        handle = req->current_outstanding_cmd;
2136        for (index = 1; index < req->num_outstanding_cmds; index++) {
2137                handle++;
2138                if (handle == req->num_outstanding_cmds)
2139                        handle = 1;
2140                if (!req->outstanding_cmds[handle])
2141                        break;
2142        }
2143        if (index == req->num_outstanding_cmds) {
2144                ql_log(ql_log_warn, vha, 0x700b,
2145                    "No room on outstanding cmd array.\n");
2146                goto queuing_error;
2147        }
2148
2149        /* Prep command array. */
2150        req->current_outstanding_cmd = handle;
2151        req->outstanding_cmds[handle] = sp;
2152        sp->handle = handle;
2153
2154        /* Adjust entry-counts as needed. */
2155        if (sp->type != SRB_SCSI_CMD)
2156                req_cnt = sp->iocbs;
2157
2158skip_cmd_array:
2159        /* Check for room on request queue. */
2160        if (req->cnt < req_cnt + 2) {
2161                if (qpair->use_shadow_reg)
2162                        cnt = *req->out_ptr;
2163                else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2164                        cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2165                else if (IS_P3P_TYPE(ha))
2166                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2167                else if (IS_FWI2_CAPABLE(ha))
2168                        cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2169                else if (IS_QLAFX00(ha))
2170                        cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2171                else
2172                        cnt = qla2x00_debounce_register(
2173                            ISP_REQ_Q_OUT(ha, &reg->isp));
2174
2175                if  (req->ring_index < cnt)
2176                        req->cnt = cnt - req->ring_index;
2177                else
2178                        req->cnt = req->length -
2179                            (req->ring_index - cnt);
2180        }
2181        if (req->cnt < req_cnt + 2)
2182                goto queuing_error;
2183
2184        /* Prep packet */
2185        req->cnt -= req_cnt;
2186        pkt = req->ring_ptr;
2187        memset(pkt, 0, REQUEST_ENTRY_SIZE);
2188        if (IS_QLAFX00(ha)) {
2189                WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2190                WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2191        } else {
2192                pkt->entry_count = req_cnt;
2193                pkt->handle = handle;
2194        }
2195
2196queuing_error:
2197        qpair->tgt_counters.num_alloc_iocb_failed++;
2198        return pkt;
2199}
2200
2201void *
2202qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2203{
2204        scsi_qla_host_t *vha = qpair->vha;
2205
2206        if (qla2x00_reset_active(vha))
2207                return NULL;
2208
2209        return __qla2x00_alloc_iocbs(qpair, sp);
2210}
2211
2212void *
2213qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2214{
2215        return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2216}
2217
2218static void
2219qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2220{
2221        struct srb_iocb *lio = &sp->u.iocb_cmd;
2222
2223        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2224        logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2225        if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2226                logio->control_flags |= LCF_NVME_PRLI;
2227
2228        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2229        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2230        logio->port_id[1] = sp->fcport->d_id.b.area;
2231        logio->port_id[2] = sp->fcport->d_id.b.domain;
2232        logio->vp_index = sp->vha->vp_idx;
2233}
2234
2235static void
2236qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2237{
2238        struct srb_iocb *lio = &sp->u.iocb_cmd;
2239
2240        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2241        logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2242
2243        if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2244                logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2245        if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2246                logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2247        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2248        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2249        logio->port_id[1] = sp->fcport->d_id.b.area;
2250        logio->port_id[2] = sp->fcport->d_id.b.domain;
2251        logio->vp_index = sp->vha->vp_idx;
2252}
2253
2254static void
2255qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2256{
2257        struct qla_hw_data *ha = sp->vha->hw;
2258        struct srb_iocb *lio = &sp->u.iocb_cmd;
2259        uint16_t opts;
2260
2261        mbx->entry_type = MBX_IOCB_TYPE;
2262        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2263        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2264        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2265        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2266        if (HAS_EXTENDED_IDS(ha)) {
2267                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2268                mbx->mb10 = cpu_to_le16(opts);
2269        } else {
2270                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2271        }
2272        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2273        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2274            sp->fcport->d_id.b.al_pa);
2275        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2276}
2277
2278static void
2279qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2280{
2281        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2282        logio->control_flags =
2283            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2284        if (!sp->fcport->se_sess ||
2285            !sp->fcport->keep_nport_handle)
2286                logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2287        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2288        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2289        logio->port_id[1] = sp->fcport->d_id.b.area;
2290        logio->port_id[2] = sp->fcport->d_id.b.domain;
2291        logio->vp_index = sp->vha->vp_idx;
2292}
2293
2294static void
2295qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2296{
2297        struct qla_hw_data *ha = sp->vha->hw;
2298
2299        mbx->entry_type = MBX_IOCB_TYPE;
2300        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2301        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2302        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2303            cpu_to_le16(sp->fcport->loop_id):
2304            cpu_to_le16(sp->fcport->loop_id << 8);
2305        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2306        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2307            sp->fcport->d_id.b.al_pa);
2308        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2309        /* Implicit: mbx->mbx10 = 0. */
2310}
2311
2312static void
2313qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2314{
2315        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2316        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2317        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2318        logio->vp_index = sp->vha->vp_idx;
2319}
2320
2321static void
2322qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2323{
2324        struct qla_hw_data *ha = sp->vha->hw;
2325
2326        mbx->entry_type = MBX_IOCB_TYPE;
2327        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2328        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2329        if (HAS_EXTENDED_IDS(ha)) {
2330                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2331                mbx->mb10 = cpu_to_le16(BIT_0);
2332        } else {
2333                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2334        }
2335        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2336        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2337        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2338        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2339        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2340}
2341
2342static void
2343qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2344{
2345        uint32_t flags;
2346        uint64_t lun;
2347        struct fc_port *fcport = sp->fcport;
2348        scsi_qla_host_t *vha = fcport->vha;
2349        struct qla_hw_data *ha = vha->hw;
2350        struct srb_iocb *iocb = &sp->u.iocb_cmd;
2351        struct req_que *req = vha->req;
2352
2353        flags = iocb->u.tmf.flags;
2354        lun = iocb->u.tmf.lun;
2355
2356        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2357        tsk->entry_count = 1;
2358        tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2359        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2360        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2361        tsk->control_flags = cpu_to_le32(flags);
2362        tsk->port_id[0] = fcport->d_id.b.al_pa;
2363        tsk->port_id[1] = fcport->d_id.b.area;
2364        tsk->port_id[2] = fcport->d_id.b.domain;
2365        tsk->vp_index = fcport->vha->vp_idx;
2366
2367        if (flags == TCF_LUN_RESET) {
2368                int_to_scsilun(lun, &tsk->lun);
2369                host_to_fcp_swap((uint8_t *)&tsk->lun,
2370                        sizeof(tsk->lun));
2371        }
2372}
2373
2374static void
2375qla2x00_els_dcmd_sp_free(void *data)
2376{
2377        srb_t *sp = data;
2378        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2379
2380        kfree(sp->fcport);
2381
2382        if (elsio->u.els_logo.els_logo_pyld)
2383                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2384                    elsio->u.els_logo.els_logo_pyld,
2385                    elsio->u.els_logo.els_logo_pyld_dma);
2386
2387        del_timer(&elsio->timer);
2388        qla2x00_rel_sp(sp);
2389}
2390
2391static void
2392qla2x00_els_dcmd_iocb_timeout(void *data)
2393{
2394        srb_t *sp = data;
2395        fc_port_t *fcport = sp->fcport;
2396        struct scsi_qla_host *vha = sp->vha;
2397        struct srb_iocb *lio = &sp->u.iocb_cmd;
2398
2399        ql_dbg(ql_dbg_io, vha, 0x3069,
2400            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2401            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2402            fcport->d_id.b.al_pa);
2403
2404        complete(&lio->u.els_logo.comp);
2405}
2406
2407static void
2408qla2x00_els_dcmd_sp_done(void *ptr, int res)
2409{
2410        srb_t *sp = ptr;
2411        fc_port_t *fcport = sp->fcport;
2412        struct srb_iocb *lio = &sp->u.iocb_cmd;
2413        struct scsi_qla_host *vha = sp->vha;
2414
2415        ql_dbg(ql_dbg_io, vha, 0x3072,
2416            "%s hdl=%x, portid=%02x%02x%02x done\n",
2417            sp->name, sp->handle, fcport->d_id.b.domain,
2418            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2419
2420        complete(&lio->u.els_logo.comp);
2421}
2422
2423int
2424qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2425    port_id_t remote_did)
2426{
2427        srb_t *sp;
2428        fc_port_t *fcport = NULL;
2429        struct srb_iocb *elsio = NULL;
2430        struct qla_hw_data *ha = vha->hw;
2431        struct els_logo_payload logo_pyld;
2432        int rval = QLA_SUCCESS;
2433
2434        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2435        if (!fcport) {
2436               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2437               return -ENOMEM;
2438        }
2439
2440        /* Alloc SRB structure */
2441        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2442        if (!sp) {
2443                kfree(fcport);
2444                ql_log(ql_log_info, vha, 0x70e6,
2445                 "SRB allocation failed\n");
2446                return -ENOMEM;
2447        }
2448
2449        elsio = &sp->u.iocb_cmd;
2450        fcport->loop_id = 0xFFFF;
2451        fcport->d_id.b.domain = remote_did.b.domain;
2452        fcport->d_id.b.area = remote_did.b.area;
2453        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2454
2455        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2456            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2457
2458        sp->type = SRB_ELS_DCMD;
2459        sp->name = "ELS_DCMD";
2460        sp->fcport = fcport;
2461        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2462        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2463        sp->done = qla2x00_els_dcmd_sp_done;
2464        sp->free = qla2x00_els_dcmd_sp_free;
2465
2466        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2467                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2468                            GFP_KERNEL);
2469
2470        if (!elsio->u.els_logo.els_logo_pyld) {
2471                sp->free(sp);
2472                return QLA_FUNCTION_FAILED;
2473        }
2474
2475        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2476
2477        elsio->u.els_logo.els_cmd = els_opcode;
2478        logo_pyld.opcode = els_opcode;
2479        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2480        logo_pyld.s_id[1] = vha->d_id.b.area;
2481        logo_pyld.s_id[2] = vha->d_id.b.domain;
2482        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2483        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2484
2485        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2486            sizeof(struct els_logo_payload));
2487
2488        rval = qla2x00_start_sp(sp);
2489        if (rval != QLA_SUCCESS) {
2490                sp->free(sp);
2491                return QLA_FUNCTION_FAILED;
2492        }
2493
2494        ql_dbg(ql_dbg_io, vha, 0x3074,
2495            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2496            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2497            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2498
2499        wait_for_completion(&elsio->u.els_logo.comp);
2500
2501        sp->free(sp);
2502        return rval;
2503}
2504
2505static void
2506qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2507{
2508        scsi_qla_host_t *vha = sp->vha;
2509        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2510        uint32_t        dsd_len = 24;
2511
2512        els_iocb->entry_type = ELS_IOCB_TYPE;
2513        els_iocb->entry_count = 1;
2514        els_iocb->sys_define = 0;
2515        els_iocb->entry_status = 0;
2516        els_iocb->handle = sp->handle;
2517        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2518        els_iocb->tx_dsd_count = 1;
2519        els_iocb->vp_index = vha->vp_idx;
2520        els_iocb->sof_type = EST_SOFI3;
2521        els_iocb->rx_dsd_count = 0;
2522        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2523
2524        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2525        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2526        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2527        els_iocb->s_id[0] = vha->d_id.b.al_pa;
2528        els_iocb->s_id[1] = vha->d_id.b.area;
2529        els_iocb->s_id[2] = vha->d_id.b.domain;
2530        els_iocb->control_flags = 0;
2531
2532        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2533                els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
2534                els_iocb->tx_address[0] =
2535                        cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2536                els_iocb->tx_address[1] =
2537                        cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2538                els_iocb->tx_len = dsd_len;
2539
2540                els_iocb->rx_dsd_count = 1;
2541                els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
2542                els_iocb->rx_address[0] =
2543                        cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2544                els_iocb->rx_address[1] =
2545                        cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2546                els_iocb->rx_len = dsd_len;
2547                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2548                    "PLOGI ELS IOCB:\n");
2549                ql_dump_buffer(ql_log_info, vha, 0x0109,
2550                    (uint8_t *)els_iocb, 0x70);
2551        } else {
2552                els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2553                els_iocb->tx_address[0] =
2554                    cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2555                els_iocb->tx_address[1] =
2556                    cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2557                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2558
2559                els_iocb->rx_byte_count = 0;
2560                els_iocb->rx_address[0] = 0;
2561                els_iocb->rx_address[1] = 0;
2562                els_iocb->rx_len = 0;
2563        }
2564
2565        sp->vha->qla_stats.control_requests++;
2566}
2567
2568static void
2569qla2x00_els_dcmd2_sp_free(void *data)
2570{
2571        srb_t *sp = data;
2572        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2573
2574        if (elsio->u.els_plogi.els_plogi_pyld)
2575                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2576                    elsio->u.els_plogi.els_plogi_pyld,
2577                    elsio->u.els_plogi.els_plogi_pyld_dma);
2578
2579        if (elsio->u.els_plogi.els_resp_pyld)
2580                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2581                    elsio->u.els_plogi.els_resp_pyld,
2582                    elsio->u.els_plogi.els_resp_pyld_dma);
2583
2584        del_timer(&elsio->timer);
2585        qla2x00_rel_sp(sp);
2586}
2587
2588static void
2589qla2x00_els_dcmd2_iocb_timeout(void *data)
2590{
2591        srb_t *sp = data;
2592        fc_port_t *fcport = sp->fcport;
2593        struct scsi_qla_host *vha = sp->vha;
2594        struct qla_hw_data *ha = vha->hw;
2595        struct srb_iocb *lio = &sp->u.iocb_cmd;
2596        unsigned long flags = 0;
2597        int res;
2598
2599        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2600            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2601            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2602
2603        /* Abort the exchange */
2604        spin_lock_irqsave(&ha->hardware_lock, flags);
2605        res = ha->isp_ops->abort_command(sp);
2606        ql_dbg(ql_dbg_io, vha, 0x3070,
2607            "mbx abort_command %s\n",
2608            (res == QLA_SUCCESS) ? "successful" : "failed");
2609        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2610
2611        complete(&lio->u.els_plogi.comp);
2612}
2613
2614static void
2615qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2616{
2617        srb_t *sp = ptr;
2618        fc_port_t *fcport = sp->fcport;
2619        struct srb_iocb *lio = &sp->u.iocb_cmd;
2620        struct scsi_qla_host *vha = sp->vha;
2621
2622        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
2623            "%s ELS hdl=%x, portid=%06x done %8phC\n",
2624            sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
2625
2626        complete(&lio->u.els_plogi.comp);
2627}
2628
2629int
2630qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2631                       fc_port_t *fcport, port_id_t remote_did)
2632{
2633        srb_t *sp;
2634        struct srb_iocb *elsio = NULL;
2635        struct qla_hw_data *ha = vha->hw;
2636        int rval = QLA_SUCCESS;
2637        void    *ptr, *resp_ptr;
2638        dma_addr_t ptr_dma;
2639
2640        /* Alloc SRB structure */
2641        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2642        if (!sp) {
2643                ql_log(ql_log_info, vha, 0x70e6,
2644                 "SRB allocation failed\n");
2645                return -ENOMEM;
2646        }
2647
2648        elsio = &sp->u.iocb_cmd;
2649        fcport->d_id.b.domain = remote_did.b.domain;
2650        fcport->d_id.b.area = remote_did.b.area;
2651        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2652
2653        ql_dbg(ql_dbg_io, vha, 0x3073,
2654            "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2655
2656        sp->type = SRB_ELS_DCMD;
2657        sp->name = "ELS_DCMD";
2658        sp->fcport = fcport;
2659        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2660        elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2661        sp->done = qla2x00_els_dcmd2_sp_done;
2662        sp->free = qla2x00_els_dcmd2_sp_free;
2663
2664        ptr = elsio->u.els_plogi.els_plogi_pyld =
2665            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2666                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2667        ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2668
2669        if (!elsio->u.els_plogi.els_plogi_pyld) {
2670                rval = QLA_FUNCTION_FAILED;
2671                goto out;
2672        }
2673
2674        resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2675            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2676                &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2677
2678        if (!elsio->u.els_plogi.els_resp_pyld) {
2679                rval = QLA_FUNCTION_FAILED;
2680                goto out;
2681        }
2682
2683        ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2684
2685        memset(ptr, 0, sizeof(struct els_plogi_payload));
2686        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2687        elsio->u.els_plogi.els_cmd = els_opcode;
2688        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2689        qla24xx_get_port_login_templ(vha, ptr_dma + 4,
2690                &elsio->u.els_plogi.els_plogi_pyld->data[0],
2691                sizeof(struct els_plogi_payload));
2692
2693        ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2694        ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
2695            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2696
2697        init_completion(&elsio->u.els_plogi.comp);
2698        rval = qla2x00_start_sp(sp);
2699        if (rval != QLA_SUCCESS) {
2700                rval = QLA_FUNCTION_FAILED;
2701                goto out;
2702        }
2703
2704        ql_dbg(ql_dbg_io, vha, 0x3074,
2705            "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
2706            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
2707
2708        wait_for_completion(&elsio->u.els_plogi.comp);
2709
2710        if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2711                rval = QLA_FUNCTION_FAILED;
2712
2713out:
2714        sp->free(sp);
2715        return rval;
2716}
2717
2718static void
2719qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2720{
2721        struct bsg_job *bsg_job = sp->u.bsg_job;
2722        struct fc_bsg_request *bsg_request = bsg_job->request;
2723
2724        els_iocb->entry_type = ELS_IOCB_TYPE;
2725        els_iocb->entry_count = 1;
2726        els_iocb->sys_define = 0;
2727        els_iocb->entry_status = 0;
2728        els_iocb->handle = sp->handle;
2729        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2730        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2731        els_iocb->vp_index = sp->vha->vp_idx;
2732        els_iocb->sof_type = EST_SOFI3;
2733        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2734
2735        els_iocb->opcode =
2736            sp->type == SRB_ELS_CMD_RPT ?
2737            bsg_request->rqst_data.r_els.els_code :
2738            bsg_request->rqst_data.h_els.command_code;
2739        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2740        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2741        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2742        els_iocb->control_flags = 0;
2743        els_iocb->rx_byte_count =
2744            cpu_to_le32(bsg_job->reply_payload.payload_len);
2745        els_iocb->tx_byte_count =
2746            cpu_to_le32(bsg_job->request_payload.payload_len);
2747
2748        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2749            (bsg_job->request_payload.sg_list)));
2750        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2751            (bsg_job->request_payload.sg_list)));
2752        els_iocb->tx_len = cpu_to_le32(sg_dma_len
2753            (bsg_job->request_payload.sg_list));
2754
2755        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2756            (bsg_job->reply_payload.sg_list)));
2757        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2758            (bsg_job->reply_payload.sg_list)));
2759        els_iocb->rx_len = cpu_to_le32(sg_dma_len
2760            (bsg_job->reply_payload.sg_list));
2761
2762        sp->vha->qla_stats.control_requests++;
2763}
2764
2765static void
2766qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2767{
2768        uint16_t        avail_dsds;
2769        uint32_t        *cur_dsd;
2770        struct scatterlist *sg;
2771        int index;
2772        uint16_t tot_dsds;
2773        scsi_qla_host_t *vha = sp->vha;
2774        struct qla_hw_data *ha = vha->hw;
2775        struct bsg_job *bsg_job = sp->u.bsg_job;
2776        int loop_iterartion = 0;
2777        int entry_count = 1;
2778
2779        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2780        ct_iocb->entry_type = CT_IOCB_TYPE;
2781        ct_iocb->entry_status = 0;
2782        ct_iocb->handle1 = sp->handle;
2783        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2784        ct_iocb->status = cpu_to_le16(0);
2785        ct_iocb->control_flags = cpu_to_le16(0);
2786        ct_iocb->timeout = 0;
2787        ct_iocb->cmd_dsd_count =
2788            cpu_to_le16(bsg_job->request_payload.sg_cnt);
2789        ct_iocb->total_dsd_count =
2790            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2791        ct_iocb->req_bytecount =
2792            cpu_to_le32(bsg_job->request_payload.payload_len);
2793        ct_iocb->rsp_bytecount =
2794            cpu_to_le32(bsg_job->reply_payload.payload_len);
2795
2796        ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2797            (bsg_job->request_payload.sg_list)));
2798        ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2799            (bsg_job->request_payload.sg_list)));
2800        ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2801
2802        ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2803            (bsg_job->reply_payload.sg_list)));
2804        ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2805            (bsg_job->reply_payload.sg_list)));
2806        ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2807
2808        avail_dsds = 1;
2809        cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2810        index = 0;
2811        tot_dsds = bsg_job->reply_payload.sg_cnt;
2812
2813        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2814                dma_addr_t       sle_dma;
2815                cont_a64_entry_t *cont_pkt;
2816
2817                /* Allocate additional continuation packets? */
2818                if (avail_dsds == 0) {
2819                        /*
2820                        * Five DSDs are available in the Cont.
2821                        * Type 1 IOCB.
2822                               */
2823                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2824                            vha->hw->req_q_map[0]);
2825                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2826                        avail_dsds = 5;
2827                        entry_count++;
2828                }
2829
2830                sle_dma = sg_dma_address(sg);
2831                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2832                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2833                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2834                loop_iterartion++;
2835                avail_dsds--;
2836        }
2837        ct_iocb->entry_count = entry_count;
2838
2839        sp->vha->qla_stats.control_requests++;
2840}
2841
2842static void
2843qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2844{
2845        uint16_t        avail_dsds;
2846        uint32_t        *cur_dsd;
2847        struct scatterlist *sg;
2848        int index;
2849        uint16_t cmd_dsds, rsp_dsds;
2850        scsi_qla_host_t *vha = sp->vha;
2851        struct qla_hw_data *ha = vha->hw;
2852        struct bsg_job *bsg_job = sp->u.bsg_job;
2853        int entry_count = 1;
2854        cont_a64_entry_t *cont_pkt = NULL;
2855
2856        ct_iocb->entry_type = CT_IOCB_TYPE;
2857        ct_iocb->entry_status = 0;
2858        ct_iocb->sys_define = 0;
2859        ct_iocb->handle = sp->handle;
2860
2861        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2862        ct_iocb->vp_index = sp->vha->vp_idx;
2863        ct_iocb->comp_status = cpu_to_le16(0);
2864
2865        cmd_dsds = bsg_job->request_payload.sg_cnt;
2866        rsp_dsds = bsg_job->reply_payload.sg_cnt;
2867
2868        ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2869        ct_iocb->timeout = 0;
2870        ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2871        ct_iocb->cmd_byte_count =
2872            cpu_to_le32(bsg_job->request_payload.payload_len);
2873
2874        avail_dsds = 2;
2875        cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2876        index = 0;
2877
2878        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2879                dma_addr_t       sle_dma;
2880
2881                /* Allocate additional continuation packets? */
2882                if (avail_dsds == 0) {
2883                        /*
2884                         * Five DSDs are available in the Cont.
2885                         * Type 1 IOCB.
2886                         */
2887                        cont_pkt = qla2x00_prep_cont_type1_iocb(
2888                            vha, ha->req_q_map[0]);
2889                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2890                        avail_dsds = 5;
2891                        entry_count++;
2892                }
2893
2894                sle_dma = sg_dma_address(sg);
2895                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2896                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2897                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2898                avail_dsds--;
2899        }
2900
2901        index = 0;
2902
2903        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2904                dma_addr_t       sle_dma;
2905
2906                /* Allocate additional continuation packets? */
2907                if (avail_dsds == 0) {
2908                        /*
2909                        * Five DSDs are available in the Cont.
2910                        * Type 1 IOCB.
2911                               */
2912                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2913                            ha->req_q_map[0]);
2914                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2915                        avail_dsds = 5;
2916                        entry_count++;
2917                }
2918
2919                sle_dma = sg_dma_address(sg);
2920                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2921                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2922                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2923                avail_dsds--;
2924        }
2925        ct_iocb->entry_count = entry_count;
2926}
2927
2928/*
2929 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2930 * @sp: command to send to the ISP
2931 *
2932 * Returns non-zero if a failure occurred, else zero.
2933 */
2934int
2935qla82xx_start_scsi(srb_t *sp)
2936{
2937        int             nseg;
2938        unsigned long   flags;
2939        struct scsi_cmnd *cmd;
2940        uint32_t        *clr_ptr;
2941        uint32_t        index;
2942        uint32_t        handle;
2943        uint16_t        cnt;
2944        uint16_t        req_cnt;
2945        uint16_t        tot_dsds;
2946        struct device_reg_82xx __iomem *reg;
2947        uint32_t dbval;
2948        uint32_t *fcp_dl;
2949        uint8_t additional_cdb_len;
2950        struct ct6_dsd *ctx;
2951        struct scsi_qla_host *vha = sp->vha;
2952        struct qla_hw_data *ha = vha->hw;
2953        struct req_que *req = NULL;
2954        struct rsp_que *rsp = NULL;
2955
2956        /* Setup device pointers. */
2957        reg = &ha->iobase->isp82;
2958        cmd = GET_CMD_SP(sp);
2959        req = vha->req;
2960        rsp = ha->rsp_q_map[0];
2961
2962        /* So we know we haven't pci_map'ed anything yet */
2963        tot_dsds = 0;
2964
2965        dbval = 0x04 | (ha->portnum << 5);
2966
2967        /* Send marker if required */
2968        if (vha->marker_needed != 0) {
2969                if (qla2x00_marker(vha, req,
2970                        rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2971                        ql_log(ql_log_warn, vha, 0x300c,
2972                            "qla2x00_marker failed for cmd=%p.\n", cmd);
2973                        return QLA_FUNCTION_FAILED;
2974                }
2975                vha->marker_needed = 0;
2976        }
2977
2978        /* Acquire ring specific lock */
2979        spin_lock_irqsave(&ha->hardware_lock, flags);
2980
2981        /* Check for room in outstanding command list. */
2982        handle = req->current_outstanding_cmd;
2983        for (index = 1; index < req->num_outstanding_cmds; index++) {
2984                handle++;
2985                if (handle == req->num_outstanding_cmds)
2986                        handle = 1;
2987                if (!req->outstanding_cmds[handle])
2988                        break;
2989        }
2990        if (index == req->num_outstanding_cmds)
2991                goto queuing_error;
2992
2993        /* Map the sg table so we have an accurate count of sg entries needed */
2994        if (scsi_sg_count(cmd)) {
2995                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2996                    scsi_sg_count(cmd), cmd->sc_data_direction);
2997                if (unlikely(!nseg))
2998                        goto queuing_error;
2999        } else
3000                nseg = 0;
3001
3002        tot_dsds = nseg;
3003
3004        if (tot_dsds > ql2xshiftctondsd) {
3005                struct cmd_type_6 *cmd_pkt;
3006                uint16_t more_dsd_lists = 0;
3007                struct dsd_dma *dsd_ptr;
3008                uint16_t i;
3009
3010                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3011                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3012                        ql_dbg(ql_dbg_io, vha, 0x300d,
3013                            "Num of DSD list %d is than %d for cmd=%p.\n",
3014                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3015                            cmd);
3016                        goto queuing_error;
3017                }
3018
3019                if (more_dsd_lists <= ha->gbl_dsd_avail)
3020                        goto sufficient_dsds;
3021                else
3022                        more_dsd_lists -= ha->gbl_dsd_avail;
3023
3024                for (i = 0; i < more_dsd_lists; i++) {
3025                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3026                        if (!dsd_ptr) {
3027                                ql_log(ql_log_fatal, vha, 0x300e,
3028                                    "Failed to allocate memory for dsd_dma "
3029                                    "for cmd=%p.\n", cmd);
3030                                goto queuing_error;
3031                        }
3032
3033                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3034                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3035                        if (!dsd_ptr->dsd_addr) {
3036                                kfree(dsd_ptr);
3037                                ql_log(ql_log_fatal, vha, 0x300f,
3038                                    "Failed to allocate memory for dsd_addr "
3039                                    "for cmd=%p.\n", cmd);
3040                                goto queuing_error;
3041                        }
3042                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3043                        ha->gbl_dsd_avail++;
3044                }
3045
3046sufficient_dsds:
3047                req_cnt = 1;
3048
3049                if (req->cnt < (req_cnt + 2)) {
3050                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3051                                &reg->req_q_out[0]);
3052                        if (req->ring_index < cnt)
3053                                req->cnt = cnt - req->ring_index;
3054                        else
3055                                req->cnt = req->length -
3056                                        (req->ring_index - cnt);
3057                        if (req->cnt < (req_cnt + 2))
3058                                goto queuing_error;
3059                }
3060
3061                ctx = sp->u.scmd.ctx =
3062                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3063                if (!ctx) {
3064                        ql_log(ql_log_fatal, vha, 0x3010,
3065                            "Failed to allocate ctx for cmd=%p.\n", cmd);
3066                        goto queuing_error;
3067                }
3068
3069                memset(ctx, 0, sizeof(struct ct6_dsd));
3070                ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
3071                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3072                if (!ctx->fcp_cmnd) {
3073                        ql_log(ql_log_fatal, vha, 0x3011,
3074                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3075                        goto queuing_error;
3076                }
3077
3078                /* Initialize the DSD list and dma handle */
3079                INIT_LIST_HEAD(&ctx->dsd_list);
3080                ctx->dsd_use_cnt = 0;
3081
3082                if (cmd->cmd_len > 16) {
3083                        additional_cdb_len = cmd->cmd_len - 16;
3084                        if ((cmd->cmd_len % 4) != 0) {
3085                                /* SCSI command bigger than 16 bytes must be
3086                                 * multiple of 4
3087                                 */
3088                                ql_log(ql_log_warn, vha, 0x3012,
3089                                    "scsi cmd len %d not multiple of 4 "
3090                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
3091                                goto queuing_error_fcp_cmnd;
3092                        }
3093                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3094                } else {
3095                        additional_cdb_len = 0;
3096                        ctx->fcp_cmnd_len = 12 + 16 + 4;
3097                }
3098
3099                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3100                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3101
3102                /* Zero out remaining portion of packet. */
3103                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3104                clr_ptr = (uint32_t *)cmd_pkt + 2;
3105                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3106                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3107
3108                /* Set NPORT-ID and LUN number*/
3109                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3110                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3111                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3112                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3113                cmd_pkt->vp_index = sp->vha->vp_idx;
3114
3115                /* Build IOCB segments */
3116                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3117                        goto queuing_error_fcp_cmnd;
3118
3119                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3120                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3121
3122                /* build FCP_CMND IU */
3123                memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
3124                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3125                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3126
3127                if (cmd->sc_data_direction == DMA_TO_DEVICE)
3128                        ctx->fcp_cmnd->additional_cdb_len |= 1;
3129                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3130                        ctx->fcp_cmnd->additional_cdb_len |= 2;
3131
3132                /* Populate the FCP_PRIO. */
3133                if (ha->flags.fcp_prio_enabled)
3134                        ctx->fcp_cmnd->task_attribute |=
3135                            sp->fcport->fcp_prio << 3;
3136
3137                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3138
3139                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3140                    additional_cdb_len);
3141                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3142
3143                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3144                cmd_pkt->fcp_cmnd_dseg_address[0] =
3145                    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3146                cmd_pkt->fcp_cmnd_dseg_address[1] =
3147                    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3148
3149                sp->flags |= SRB_FCP_CMND_DMA_VALID;
3150                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3151                /* Set total data segment count. */
3152                cmd_pkt->entry_count = (uint8_t)req_cnt;
3153                /* Specify response queue number where
3154                 * completion should happen
3155                 */
3156                cmd_pkt->entry_status = (uint8_t) rsp->id;
3157        } else {
3158                struct cmd_type_7 *cmd_pkt;
3159                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3160                if (req->cnt < (req_cnt + 2)) {
3161                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3162                            &reg->req_q_out[0]);
3163                        if (req->ring_index < cnt)
3164                                req->cnt = cnt - req->ring_index;
3165                        else
3166                                req->cnt = req->length -
3167                                        (req->ring_index - cnt);
3168                }
3169                if (req->cnt < (req_cnt + 2))
3170                        goto queuing_error;
3171
3172                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3173                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3174
3175                /* Zero out remaining portion of packet. */
3176                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3177                clr_ptr = (uint32_t *)cmd_pkt + 2;
3178                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3179                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3180
3181                /* Set NPORT-ID and LUN number*/
3182                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3183                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3184                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3185                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3186                cmd_pkt->vp_index = sp->vha->vp_idx;
3187
3188                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3189                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3190                    sizeof(cmd_pkt->lun));
3191
3192                /* Populate the FCP_PRIO. */
3193                if (ha->flags.fcp_prio_enabled)
3194                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3195
3196                /* Load SCSI command packet. */
3197                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3198                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3199
3200                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3201
3202                /* Build IOCB segments */
3203                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3204
3205                /* Set total data segment count. */
3206                cmd_pkt->entry_count = (uint8_t)req_cnt;
3207                /* Specify response queue number where
3208                 * completion should happen.
3209                 */
3210                cmd_pkt->entry_status = (uint8_t) rsp->id;
3211
3212        }
3213        /* Build command packet. */
3214        req->current_outstanding_cmd = handle;
3215        req->outstanding_cmds[handle] = sp;
3216        sp->handle = handle;
3217        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3218        req->cnt -= req_cnt;
3219        wmb();
3220
3221        /* Adjust ring index. */
3222        req->ring_index++;
3223        if (req->ring_index == req->length) {
3224                req->ring_index = 0;
3225                req->ring_ptr = req->ring;
3226        } else
3227                req->ring_ptr++;
3228
3229        sp->flags |= SRB_DMA_VALID;
3230
3231        /* Set chip new ring index. */
3232        /* write, read and verify logic */
3233        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3234        if (ql2xdbwr)
3235                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3236        else {
3237                WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3238                wmb();
3239                while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3240                        WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3241                        wmb();
3242                }
3243        }
3244
3245        /* Manage unprocessed RIO/ZIO commands in response queue. */
3246        if (vha->flags.process_response_queue &&
3247            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3248                qla24xx_process_response_queue(vha, rsp);
3249
3250        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3251        return QLA_SUCCESS;
3252
3253queuing_error_fcp_cmnd:
3254        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3255queuing_error:
3256        if (tot_dsds)
3257                scsi_dma_unmap(cmd);
3258
3259        if (sp->u.scmd.ctx) {
3260                mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3261                sp->u.scmd.ctx = NULL;
3262        }
3263        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3264
3265        return QLA_FUNCTION_FAILED;
3266}
3267
3268static void
3269qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3270{
3271        struct srb_iocb *aio = &sp->u.iocb_cmd;
3272        scsi_qla_host_t *vha = sp->vha;
3273        struct req_que *req = vha->req;
3274
3275        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3276        abt_iocb->entry_type = ABORT_IOCB_TYPE;
3277        abt_iocb->entry_count = 1;
3278        abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3279        abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3280        abt_iocb->handle_to_abort =
3281            cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3282                                    aio->u.abt.cmd_hndl));
3283        abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3284        abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3285        abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3286        abt_iocb->vp_index = vha->vp_idx;
3287        abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3288        /* Send the command to the firmware */
3289        wmb();
3290}
3291
3292static void
3293qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3294{
3295        int i, sz;
3296
3297        mbx->entry_type = MBX_IOCB_TYPE;
3298        mbx->handle = sp->handle;
3299        sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3300
3301        for (i = 0; i < sz; i++)
3302                mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3303}
3304
3305static void
3306qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3307{
3308        sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3309        qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3310        ct_pkt->handle = sp->handle;
3311}
3312
3313static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3314        struct nack_to_isp *nack)
3315{
3316        struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3317
3318        nack->entry_type = NOTIFY_ACK_TYPE;
3319        nack->entry_count = 1;
3320        nack->ox_id = ntfy->ox_id;
3321
3322        nack->u.isp24.handle = sp->handle;
3323        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3324        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3325                nack->u.isp24.flags = ntfy->u.isp24.flags &
3326                        cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3327        }
3328        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3329        nack->u.isp24.status = ntfy->u.isp24.status;
3330        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3331        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3332        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3333        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3334        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3335        nack->u.isp24.srr_flags = 0;
3336        nack->u.isp24.srr_reject_code = 0;
3337        nack->u.isp24.srr_reject_code_expl = 0;
3338        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3339}
3340
3341/*
3342 * Build NVME LS request
3343 */
3344static int
3345qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3346{
3347        struct srb_iocb *nvme;
3348        int     rval = QLA_SUCCESS;
3349
3350        nvme = &sp->u.iocb_cmd;
3351        cmd_pkt->entry_type = PT_LS4_REQUEST;
3352        cmd_pkt->entry_count = 1;
3353        cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3354
3355        cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3356        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3357        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3358
3359        cmd_pkt->tx_dseg_count = 1;
3360        cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3361        cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3362        cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3363        cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3364
3365        cmd_pkt->rx_dseg_count = 1;
3366        cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3367        cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3368        cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3369        cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3370
3371        return rval;
3372}
3373
3374static void
3375qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3376{
3377        int map, pos;
3378
3379        vce->entry_type = VP_CTRL_IOCB_TYPE;
3380        vce->handle = sp->handle;
3381        vce->entry_count = 1;
3382        vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3383        vce->vp_count = cpu_to_le16(1);
3384
3385        /*
3386         * index map in firmware starts with 1; decrement index
3387         * this is ok as we never use index 0
3388         */
3389        map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3390        pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3391        vce->vp_idx_map[map] |= 1 << pos;
3392}
3393
3394static void
3395qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3396{
3397        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3398        logio->control_flags =
3399            cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3400
3401        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3402        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3403        logio->port_id[1] = sp->fcport->d_id.b.area;
3404        logio->port_id[2] = sp->fcport->d_id.b.domain;
3405        logio->vp_index = sp->fcport->vha->vp_idx;
3406}
3407
3408int
3409qla2x00_start_sp(srb_t *sp)
3410{
3411        int rval;
3412        scsi_qla_host_t *vha = sp->vha;
3413        struct qla_hw_data *ha = vha->hw;
3414        void *pkt;
3415        unsigned long flags;
3416
3417        rval = QLA_FUNCTION_FAILED;
3418        spin_lock_irqsave(&ha->hardware_lock, flags);
3419        pkt = qla2x00_alloc_iocbs(vha, sp);
3420        if (!pkt) {
3421                ql_log(ql_log_warn, vha, 0x700c,
3422                    "qla2x00_alloc_iocbs failed.\n");
3423                goto done;
3424        }
3425
3426        rval = QLA_SUCCESS;
3427        switch (sp->type) {
3428        case SRB_LOGIN_CMD:
3429                IS_FWI2_CAPABLE(ha) ?
3430                    qla24xx_login_iocb(sp, pkt) :
3431                    qla2x00_login_iocb(sp, pkt);
3432                break;
3433        case SRB_PRLI_CMD:
3434                qla24xx_prli_iocb(sp, pkt);
3435                break;
3436        case SRB_LOGOUT_CMD:
3437                IS_FWI2_CAPABLE(ha) ?
3438                    qla24xx_logout_iocb(sp, pkt) :
3439                    qla2x00_logout_iocb(sp, pkt);
3440                break;
3441        case SRB_ELS_CMD_RPT:
3442        case SRB_ELS_CMD_HST:
3443                qla24xx_els_iocb(sp, pkt);
3444                break;
3445        case SRB_CT_CMD:
3446                IS_FWI2_CAPABLE(ha) ?
3447                    qla24xx_ct_iocb(sp, pkt) :
3448                    qla2x00_ct_iocb(sp, pkt);
3449                break;
3450        case SRB_ADISC_CMD:
3451                IS_FWI2_CAPABLE(ha) ?
3452                    qla24xx_adisc_iocb(sp, pkt) :
3453                    qla2x00_adisc_iocb(sp, pkt);
3454                break;
3455        case SRB_TM_CMD:
3456                IS_QLAFX00(ha) ?
3457                    qlafx00_tm_iocb(sp, pkt) :
3458                    qla24xx_tm_iocb(sp, pkt);
3459                break;
3460        case SRB_FXIOCB_DCMD:
3461        case SRB_FXIOCB_BCMD:
3462                qlafx00_fxdisc_iocb(sp, pkt);
3463                break;
3464        case SRB_NVME_LS:
3465                qla_nvme_ls(sp, pkt);
3466                break;
3467        case SRB_ABT_CMD:
3468                IS_QLAFX00(ha) ?
3469                        qlafx00_abort_iocb(sp, pkt) :
3470                        qla24xx_abort_iocb(sp, pkt);
3471                break;
3472        case SRB_ELS_DCMD:
3473                qla24xx_els_logo_iocb(sp, pkt);
3474                break;
3475        case SRB_CT_PTHRU_CMD:
3476                qla2x00_ctpthru_cmd_iocb(sp, pkt);
3477                break;
3478        case SRB_MB_IOCB:
3479                qla2x00_mb_iocb(sp, pkt);
3480                break;
3481        case SRB_NACK_PLOGI:
3482        case SRB_NACK_PRLI:
3483        case SRB_NACK_LOGO:
3484                qla2x00_send_notify_ack_iocb(sp, pkt);
3485                break;
3486        case SRB_CTRL_VP:
3487                qla25xx_ctrlvp_iocb(sp, pkt);
3488                break;
3489        case SRB_PRLO_CMD:
3490                qla24xx_prlo_iocb(sp, pkt);
3491                break;
3492        default:
3493                break;
3494        }
3495
3496        wmb();
3497        qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3498done:
3499        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3500        return rval;
3501}
3502
3503static void
3504qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3505                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3506{
3507        uint16_t avail_dsds;
3508        uint32_t *cur_dsd;
3509        uint32_t req_data_len = 0;
3510        uint32_t rsp_data_len = 0;
3511        struct scatterlist *sg;
3512        int index;
3513        int entry_count = 1;
3514        struct bsg_job *bsg_job = sp->u.bsg_job;
3515
3516        /*Update entry type to indicate bidir command */
3517        *((uint32_t *)(&cmd_pkt->entry_type)) =
3518                cpu_to_le32(COMMAND_BIDIRECTIONAL);
3519
3520        /* Set the transfer direction, in this set both flags
3521         * Also set the BD_WRAP_BACK flag, firmware will take care
3522         * assigning DID=SID for outgoing pkts.
3523         */
3524        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3525        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3526        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3527                                                        BD_WRAP_BACK);
3528
3529        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3530        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3531        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3532        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3533
3534        vha->bidi_stats.transfer_bytes += req_data_len;
3535        vha->bidi_stats.io_count++;
3536
3537        vha->qla_stats.output_bytes += req_data_len;
3538        vha->qla_stats.output_requests++;
3539
3540        /* Only one dsd is available for bidirectional IOCB, remaining dsds
3541         * are bundled in continuation iocb
3542         */
3543        avail_dsds = 1;
3544        cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3545
3546        index = 0;
3547
3548        for_each_sg(bsg_job->request_payload.sg_list, sg,
3549                                bsg_job->request_payload.sg_cnt, index) {
3550                dma_addr_t sle_dma;
3551                cont_a64_entry_t *cont_pkt;
3552
3553                /* Allocate additional continuation packets */
3554                if (avail_dsds == 0) {
3555                        /* Continuation type 1 IOCB can accomodate
3556                         * 5 DSDS
3557                         */
3558                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3559                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3560                        avail_dsds = 5;
3561                        entry_count++;
3562                }
3563                sle_dma = sg_dma_address(sg);
3564                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3565                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3566                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3567                avail_dsds--;
3568        }
3569        /* For read request DSD will always goes to continuation IOCB
3570         * and follow the write DSD. If there is room on the current IOCB
3571         * then it is added to that IOCB else new continuation IOCB is
3572         * allocated.
3573         */
3574        for_each_sg(bsg_job->reply_payload.sg_list, sg,
3575                                bsg_job->reply_payload.sg_cnt, index) {
3576                dma_addr_t sle_dma;
3577                cont_a64_entry_t *cont_pkt;
3578
3579                /* Allocate additional continuation packets */
3580                if (avail_dsds == 0) {
3581                        /* Continuation type 1 IOCB can accomodate
3582                         * 5 DSDS
3583                         */
3584                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3585                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3586                        avail_dsds = 5;
3587                        entry_count++;
3588                }
3589                sle_dma = sg_dma_address(sg);
3590                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3591                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3592                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3593                avail_dsds--;
3594        }
3595        /* This value should be same as number of IOCB required for this cmd */
3596        cmd_pkt->entry_count = entry_count;
3597}
3598
3599int
3600qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3601{
3602
3603        struct qla_hw_data *ha = vha->hw;
3604        unsigned long flags;
3605        uint32_t handle;
3606        uint32_t index;
3607        uint16_t req_cnt;
3608        uint16_t cnt;
3609        uint32_t *clr_ptr;
3610        struct cmd_bidir *cmd_pkt = NULL;
3611        struct rsp_que *rsp;
3612        struct req_que *req;
3613        int rval = EXT_STATUS_OK;
3614
3615        rval = QLA_SUCCESS;
3616
3617        rsp = ha->rsp_q_map[0];
3618        req = vha->req;
3619
3620        /* Send marker if required */
3621        if (vha->marker_needed != 0) {
3622                if (qla2x00_marker(vha, req,
3623                        rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3624                        return EXT_STATUS_MAILBOX;
3625                vha->marker_needed = 0;
3626        }
3627
3628        /* Acquire ring specific lock */
3629        spin_lock_irqsave(&ha->hardware_lock, flags);
3630
3631        /* Check for room in outstanding command list. */
3632        handle = req->current_outstanding_cmd;
3633        for (index = 1; index < req->num_outstanding_cmds; index++) {
3634                handle++;
3635                if (handle == req->num_outstanding_cmds)
3636                        handle = 1;
3637                if (!req->outstanding_cmds[handle])
3638                        break;
3639        }
3640
3641        if (index == req->num_outstanding_cmds) {
3642                rval = EXT_STATUS_BUSY;
3643                goto queuing_error;
3644        }
3645
3646        /* Calculate number of IOCB required */
3647        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3648
3649        /* Check for room on request queue. */
3650        if (req->cnt < req_cnt + 2) {
3651                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3652                    RD_REG_DWORD_RELAXED(req->req_q_out);
3653                if  (req->ring_index < cnt)
3654                        req->cnt = cnt - req->ring_index;
3655                else
3656                        req->cnt = req->length -
3657                                (req->ring_index - cnt);
3658        }
3659        if (req->cnt < req_cnt + 2) {
3660                rval = EXT_STATUS_BUSY;
3661                goto queuing_error;
3662        }
3663
3664        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3665        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3666
3667        /* Zero out remaining portion of packet. */
3668        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3669        clr_ptr = (uint32_t *)cmd_pkt + 2;
3670        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3671
3672        /* Set NPORT-ID  (of vha)*/
3673        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3674        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3675        cmd_pkt->port_id[1] = vha->d_id.b.area;
3676        cmd_pkt->port_id[2] = vha->d_id.b.domain;
3677
3678        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3679        cmd_pkt->entry_status = (uint8_t) rsp->id;
3680        /* Build command packet. */
3681        req->current_outstanding_cmd = handle;
3682        req->outstanding_cmds[handle] = sp;
3683        sp->handle = handle;
3684        req->cnt -= req_cnt;
3685
3686        /* Send the command to the firmware */
3687        wmb();
3688        qla2x00_start_iocbs(vha, req);
3689queuing_error:
3690        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3691        return rval;
3692}
3693