linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_target.h"
   9
  10#include <linux/blkdev.h>
  11#include <linux/delay.h>
  12
  13#include <scsi/scsi_tcq.h>
  14
  15/**
  16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17 * @cmd: SCSI command
  18 *
  19 * Returns the proper CF_* direction based on CDB.
  20 */
  21static inline uint16_t
  22qla2x00_get_cmd_direction(srb_t *sp)
  23{
  24        uint16_t cflags;
  25        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  26        struct scsi_qla_host *vha = sp->vha;
  27
  28        cflags = 0;
  29
  30        /* Set transfer direction */
  31        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  32                cflags = CF_WRITE;
  33                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  34                vha->qla_stats.output_requests++;
  35        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  36                cflags = CF_READ;
  37                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  38                vha->qla_stats.input_requests++;
  39        }
  40        return (cflags);
  41}
  42
  43/**
  44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  45 * Continuation Type 0 IOCBs to allocate.
  46 *
  47 * @dsds: number of data segment decriptors needed
  48 *
  49 * Returns the number of IOCB entries needed to store @dsds.
  50 */
  51uint16_t
  52qla2x00_calc_iocbs_32(uint16_t dsds)
  53{
  54        uint16_t iocbs;
  55
  56        iocbs = 1;
  57        if (dsds > 3) {
  58                iocbs += (dsds - 3) / 7;
  59                if ((dsds - 3) % 7)
  60                        iocbs++;
  61        }
  62        return (iocbs);
  63}
  64
  65/**
  66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  67 * Continuation Type 1 IOCBs to allocate.
  68 *
  69 * @dsds: number of data segment decriptors needed
  70 *
  71 * Returns the number of IOCB entries needed to store @dsds.
  72 */
  73uint16_t
  74qla2x00_calc_iocbs_64(uint16_t dsds)
  75{
  76        uint16_t iocbs;
  77
  78        iocbs = 1;
  79        if (dsds > 2) {
  80                iocbs += (dsds - 2) / 5;
  81                if ((dsds - 2) % 5)
  82                        iocbs++;
  83        }
  84        return (iocbs);
  85}
  86
  87/**
  88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  89 * @ha: HA context
  90 *
  91 * Returns a pointer to the Continuation Type 0 IOCB packet.
  92 */
  93static inline cont_entry_t *
  94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  95{
  96        cont_entry_t *cont_pkt;
  97        struct req_que *req = vha->req;
  98        /* Adjust ring index. */
  99        req->ring_index++;
 100        if (req->ring_index == req->length) {
 101                req->ring_index = 0;
 102                req->ring_ptr = req->ring;
 103        } else {
 104                req->ring_ptr++;
 105        }
 106
 107        cont_pkt = (cont_entry_t *)req->ring_ptr;
 108
 109        /* Load packet defaults. */
 110        *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
 111
 112        return (cont_pkt);
 113}
 114
 115/**
 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 117 * @ha: HA context
 118 *
 119 * Returns a pointer to the continuation type 1 IOCB packet.
 120 */
 121static inline cont_a64_entry_t *
 122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 123{
 124        cont_a64_entry_t *cont_pkt;
 125
 126        /* Adjust ring index. */
 127        req->ring_index++;
 128        if (req->ring_index == req->length) {
 129                req->ring_index = 0;
 130                req->ring_ptr = req->ring;
 131        } else {
 132                req->ring_ptr++;
 133        }
 134
 135        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 136
 137        /* Load packet defaults. */
 138        *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
 139            cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
 140            cpu_to_le32(CONTINUE_A64_TYPE);
 141
 142        return (cont_pkt);
 143}
 144
 145inline int
 146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 147{
 148        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 149        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 150
 151        /* We always use DIFF Bundling for best performance */
 152        *fw_prot_opts = 0;
 153
 154        /* Translate SCSI opcode to a protection opcode */
 155        switch (scsi_get_prot_op(cmd)) {
 156        case SCSI_PROT_READ_STRIP:
 157                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 158                break;
 159        case SCSI_PROT_WRITE_INSERT:
 160                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 161                break;
 162        case SCSI_PROT_READ_INSERT:
 163                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 164                break;
 165        case SCSI_PROT_WRITE_STRIP:
 166                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 167                break;
 168        case SCSI_PROT_READ_PASS:
 169        case SCSI_PROT_WRITE_PASS:
 170                if (guard & SHOST_DIX_GUARD_IP)
 171                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 172                else
 173                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 174                break;
 175        default:        /* Normal Request */
 176                *fw_prot_opts |= PO_MODE_DIF_PASS;
 177                break;
 178        }
 179
 180        return scsi_prot_sg_count(cmd);
 181}
 182
 183/*
 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 185 * capable IOCB types.
 186 *
 187 * @sp: SRB command to process
 188 * @cmd_pkt: Command type 2 IOCB
 189 * @tot_dsds: Total number of segments to transfer
 190 */
 191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 192    uint16_t tot_dsds)
 193{
 194        uint16_t        avail_dsds;
 195        uint32_t        *cur_dsd;
 196        scsi_qla_host_t *vha;
 197        struct scsi_cmnd *cmd;
 198        struct scatterlist *sg;
 199        int i;
 200
 201        cmd = GET_CMD_SP(sp);
 202
 203        /* Update entry type to indicate Command Type 2 IOCB */
 204        *((uint32_t *)(&cmd_pkt->entry_type)) =
 205            cpu_to_le32(COMMAND_TYPE);
 206
 207        /* No data transfer */
 208        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 209                cmd_pkt->byte_count = cpu_to_le32(0);
 210                return;
 211        }
 212
 213        vha = sp->vha;
 214        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 215
 216        /* Three DSDs are available in the Command Type 2 IOCB */
 217        avail_dsds = 3;
 218        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 219
 220        /* Load data segments */
 221        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 222                cont_entry_t *cont_pkt;
 223
 224                /* Allocate additional continuation packets? */
 225                if (avail_dsds == 0) {
 226                        /*
 227                         * Seven DSDs are available in the Continuation
 228                         * Type 0 IOCB.
 229                         */
 230                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 231                        cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
 232                        avail_dsds = 7;
 233                }
 234
 235                *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
 236                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 237                avail_dsds--;
 238        }
 239}
 240
 241/**
 242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 243 * capable IOCB types.
 244 *
 245 * @sp: SRB command to process
 246 * @cmd_pkt: Command type 3 IOCB
 247 * @tot_dsds: Total number of segments to transfer
 248 */
 249void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 250    uint16_t tot_dsds)
 251{
 252        uint16_t        avail_dsds;
 253        uint32_t        *cur_dsd;
 254        scsi_qla_host_t *vha;
 255        struct scsi_cmnd *cmd;
 256        struct scatterlist *sg;
 257        int i;
 258
 259        cmd = GET_CMD_SP(sp);
 260
 261        /* Update entry type to indicate Command Type 3 IOCB */
 262        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
 263
 264        /* No data transfer */
 265        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 266                cmd_pkt->byte_count = cpu_to_le32(0);
 267                return;
 268        }
 269
 270        vha = sp->vha;
 271        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 272
 273        /* Two DSDs are available in the Command Type 3 IOCB */
 274        avail_dsds = 2;
 275        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 276
 277        /* Load data segments */
 278        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 279                dma_addr_t      sle_dma;
 280                cont_a64_entry_t *cont_pkt;
 281
 282                /* Allocate additional continuation packets? */
 283                if (avail_dsds == 0) {
 284                        /*
 285                         * Five DSDs are available in the Continuation
 286                         * Type 1 IOCB.
 287                         */
 288                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 289                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 290                        avail_dsds = 5;
 291                }
 292
 293                sle_dma = sg_dma_address(sg);
 294                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 295                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 296                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 297                avail_dsds--;
 298        }
 299}
 300
 301/**
 302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 303 * @sp: command to send to the ISP
 304 *
 305 * Returns non-zero if a failure occurred, else zero.
 306 */
 307int
 308qla2x00_start_scsi(srb_t *sp)
 309{
 310        int             nseg;
 311        unsigned long   flags;
 312        scsi_qla_host_t *vha;
 313        struct scsi_cmnd *cmd;
 314        uint32_t        *clr_ptr;
 315        uint32_t        index;
 316        uint32_t        handle;
 317        cmd_entry_t     *cmd_pkt;
 318        uint16_t        cnt;
 319        uint16_t        req_cnt;
 320        uint16_t        tot_dsds;
 321        struct device_reg_2xxx __iomem *reg;
 322        struct qla_hw_data *ha;
 323        struct req_que *req;
 324        struct rsp_que *rsp;
 325
 326        /* Setup device pointers. */
 327        vha = sp->vha;
 328        ha = vha->hw;
 329        reg = &ha->iobase->isp;
 330        cmd = GET_CMD_SP(sp);
 331        req = ha->req_q_map[0];
 332        rsp = ha->rsp_q_map[0];
 333        /* So we know we haven't pci_map'ed anything yet */
 334        tot_dsds = 0;
 335
 336        /* Send marker if required */
 337        if (vha->marker_needed != 0) {
 338                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
 339                    QLA_SUCCESS) {
 340                        return (QLA_FUNCTION_FAILED);
 341                }
 342                vha->marker_needed = 0;
 343        }
 344
 345        /* Acquire ring specific lock */
 346        spin_lock_irqsave(&ha->hardware_lock, flags);
 347
 348        /* Check for room in outstanding command list. */
 349        handle = req->current_outstanding_cmd;
 350        for (index = 1; index < req->num_outstanding_cmds; index++) {
 351                handle++;
 352                if (handle == req->num_outstanding_cmds)
 353                        handle = 1;
 354                if (!req->outstanding_cmds[handle])
 355                        break;
 356        }
 357        if (index == req->num_outstanding_cmds)
 358                goto queuing_error;
 359
 360        /* Map the sg table so we have an accurate count of sg entries needed */
 361        if (scsi_sg_count(cmd)) {
 362                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 363                    scsi_sg_count(cmd), cmd->sc_data_direction);
 364                if (unlikely(!nseg))
 365                        goto queuing_error;
 366        } else
 367                nseg = 0;
 368
 369        tot_dsds = nseg;
 370
 371        /* Calculate the number of request entries needed. */
 372        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 373        if (req->cnt < (req_cnt + 2)) {
 374                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
 375                if (req->ring_index < cnt)
 376                        req->cnt = cnt - req->ring_index;
 377                else
 378                        req->cnt = req->length -
 379                            (req->ring_index - cnt);
 380                /* If still no head room then bail out */
 381                if (req->cnt < (req_cnt + 2))
 382                        goto queuing_error;
 383        }
 384
 385        /* Build command packet */
 386        req->current_outstanding_cmd = handle;
 387        req->outstanding_cmds[handle] = sp;
 388        sp->handle = handle;
 389        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 390        req->cnt -= req_cnt;
 391
 392        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 393        cmd_pkt->handle = handle;
 394        /* Zero out remaining portion of packet. */
 395        clr_ptr = (uint32_t *)cmd_pkt + 2;
 396        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 397        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 398
 399        /* Set target ID and LUN number*/
 400        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 401        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 402        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 403
 404        /* Load SCSI command packet. */
 405        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 406        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 407
 408        /* Build IOCB segments */
 409        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 410
 411        /* Set total data segment count. */
 412        cmd_pkt->entry_count = (uint8_t)req_cnt;
 413        wmb();
 414
 415        /* Adjust ring index. */
 416        req->ring_index++;
 417        if (req->ring_index == req->length) {
 418                req->ring_index = 0;
 419                req->ring_ptr = req->ring;
 420        } else
 421                req->ring_ptr++;
 422
 423        sp->flags |= SRB_DMA_VALID;
 424
 425        /* Set chip new ring index. */
 426        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 427        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 428
 429        /* Manage unprocessed RIO/ZIO commands in response queue. */
 430        if (vha->flags.process_response_queue &&
 431            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 432                qla2x00_process_response_queue(rsp);
 433
 434        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 435        return (QLA_SUCCESS);
 436
 437queuing_error:
 438        if (tot_dsds)
 439                scsi_dma_unmap(cmd);
 440
 441        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 442
 443        return (QLA_FUNCTION_FAILED);
 444}
 445
 446/**
 447 * qla2x00_start_iocbs() - Execute the IOCB command
 448 */
 449void
 450qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 451{
 452        struct qla_hw_data *ha = vha->hw;
 453        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 454
 455        if (IS_P3P_TYPE(ha)) {
 456                qla82xx_start_iocbs(vha);
 457        } else {
 458                /* Adjust ring index. */
 459                req->ring_index++;
 460                if (req->ring_index == req->length) {
 461                        req->ring_index = 0;
 462                        req->ring_ptr = req->ring;
 463                } else
 464                        req->ring_ptr++;
 465
 466                /* Set chip new ring index. */
 467                if (ha->mqenable || IS_QLA27XX(ha)) {
 468                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 469                } else if (IS_QLA83XX(ha)) {
 470                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 471                        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 472                } else if (IS_QLAFX00(ha)) {
 473                        WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
 474                        RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
 475                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 476                } else if (IS_FWI2_CAPABLE(ha)) {
 477                        WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 478                        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
 479                } else {
 480                        WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
 481                                req->ring_index);
 482                        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
 483                }
 484        }
 485}
 486
 487/**
 488 * qla2x00_marker() - Send a marker IOCB to the firmware.
 489 * @ha: HA context
 490 * @loop_id: loop ID
 491 * @lun: LUN
 492 * @type: marker modifier
 493 *
 494 * Can be called from both normal and interrupt context.
 495 *
 496 * Returns non-zero if a failure occurred, else zero.
 497 */
 498static int
 499__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 500                        struct rsp_que *rsp, uint16_t loop_id,
 501                        uint64_t lun, uint8_t type)
 502{
 503        mrk_entry_t *mrk;
 504        struct mrk_entry_24xx *mrk24 = NULL;
 505
 506        struct qla_hw_data *ha = vha->hw;
 507        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 508
 509        req = ha->req_q_map[0];
 510        mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
 511        if (mrk == NULL) {
 512                ql_log(ql_log_warn, base_vha, 0x3026,
 513                    "Failed to allocate Marker IOCB.\n");
 514
 515                return (QLA_FUNCTION_FAILED);
 516        }
 517
 518        mrk->entry_type = MARKER_TYPE;
 519        mrk->modifier = type;
 520        if (type != MK_SYNC_ALL) {
 521                if (IS_FWI2_CAPABLE(ha)) {
 522                        mrk24 = (struct mrk_entry_24xx *) mrk;
 523                        mrk24->nport_handle = cpu_to_le16(loop_id);
 524                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 525                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 526                        mrk24->vp_index = vha->vp_idx;
 527                        mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
 528                } else {
 529                        SET_TARGET_ID(ha, mrk->target, loop_id);
 530                        mrk->lun = cpu_to_le16((uint16_t)lun);
 531                }
 532        }
 533        wmb();
 534
 535        qla2x00_start_iocbs(vha, req);
 536
 537        return (QLA_SUCCESS);
 538}
 539
 540int
 541qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 542                struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
 543                uint8_t type)
 544{
 545        int ret;
 546        unsigned long flags = 0;
 547
 548        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 549        ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
 550        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 551
 552        return (ret);
 553}
 554
 555/*
 556 * qla2x00_issue_marker
 557 *
 558 * Issue marker
 559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 560 * Might release it, then reaquire.
 561 */
 562int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 563{
 564        if (ha_locked) {
 565                if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
 566                                        MK_SYNC_ALL) != QLA_SUCCESS)
 567                        return QLA_FUNCTION_FAILED;
 568        } else {
 569                if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
 570                                        MK_SYNC_ALL) != QLA_SUCCESS)
 571                        return QLA_FUNCTION_FAILED;
 572        }
 573        vha->marker_needed = 0;
 574
 575        return QLA_SUCCESS;
 576}
 577
 578static inline int
 579qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 580        uint16_t tot_dsds)
 581{
 582        uint32_t *cur_dsd = NULL;
 583        scsi_qla_host_t *vha;
 584        struct qla_hw_data *ha;
 585        struct scsi_cmnd *cmd;
 586        struct  scatterlist *cur_seg;
 587        uint32_t *dsd_seg;
 588        void *next_dsd;
 589        uint8_t avail_dsds;
 590        uint8_t first_iocb = 1;
 591        uint32_t dsd_list_len;
 592        struct dsd_dma *dsd_ptr;
 593        struct ct6_dsd *ctx;
 594
 595        cmd = GET_CMD_SP(sp);
 596
 597        /* Update entry type to indicate Command Type 3 IOCB */
 598        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
 599
 600        /* No data transfer */
 601        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 602                cmd_pkt->byte_count = cpu_to_le32(0);
 603                return 0;
 604        }
 605
 606        vha = sp->vha;
 607        ha = vha->hw;
 608
 609        /* Set transfer direction */
 610        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 611                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 612                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 613                vha->qla_stats.output_requests++;
 614        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 615                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 616                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 617                vha->qla_stats.input_requests++;
 618        }
 619
 620        cur_seg = scsi_sglist(cmd);
 621        ctx = GET_CMD_CTX_SP(sp);
 622
 623        while (tot_dsds) {
 624                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 625                    QLA_DSDS_PER_IOCB : tot_dsds;
 626                tot_dsds -= avail_dsds;
 627                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 628
 629                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 630                    struct dsd_dma, list);
 631                next_dsd = dsd_ptr->dsd_addr;
 632                list_del(&dsd_ptr->list);
 633                ha->gbl_dsd_avail--;
 634                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 635                ctx->dsd_use_cnt++;
 636                ha->gbl_dsd_inuse++;
 637
 638                if (first_iocb) {
 639                        first_iocb = 0;
 640                        dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
 641                        *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 642                        *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 643                        cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
 644                } else {
 645                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 646                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 647                        *cur_dsd++ = cpu_to_le32(dsd_list_len);
 648                }
 649                cur_dsd = (uint32_t *)next_dsd;
 650                while (avail_dsds) {
 651                        dma_addr_t      sle_dma;
 652
 653                        sle_dma = sg_dma_address(cur_seg);
 654                        *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 655                        *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 656                        *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
 657                        cur_seg = sg_next(cur_seg);
 658                        avail_dsds--;
 659                }
 660        }
 661
 662        /* Null termination */
 663        *cur_dsd++ =  0;
 664        *cur_dsd++ = 0;
 665        *cur_dsd++ = 0;
 666        cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 667        return 0;
 668}
 669
 670/*
 671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 672 * for Command Type 6.
 673 *
 674 * @dsds: number of data segment decriptors needed
 675 *
 676 * Returns the number of dsd list needed to store @dsds.
 677 */
 678static inline uint16_t
 679qla24xx_calc_dsd_lists(uint16_t dsds)
 680{
 681        uint16_t dsd_lists = 0;
 682
 683        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 684        if (dsds % QLA_DSDS_PER_IOCB)
 685                dsd_lists++;
 686        return dsd_lists;
 687}
 688
 689
 690/**
 691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 692 * IOCB types.
 693 *
 694 * @sp: SRB command to process
 695 * @cmd_pkt: Command type 3 IOCB
 696 * @tot_dsds: Total number of segments to transfer
 697 * @req: pointer to request queue
 698 */
 699inline void
 700qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 701        uint16_t tot_dsds, struct req_que *req)
 702{
 703        uint16_t        avail_dsds;
 704        uint32_t        *cur_dsd;
 705        scsi_qla_host_t *vha;
 706        struct scsi_cmnd *cmd;
 707        struct scatterlist *sg;
 708        int i;
 709
 710        cmd = GET_CMD_SP(sp);
 711
 712        /* Update entry type to indicate Command Type 3 IOCB */
 713        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
 714
 715        /* No data transfer */
 716        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 717                cmd_pkt->byte_count = cpu_to_le32(0);
 718                return;
 719        }
 720
 721        vha = sp->vha;
 722
 723        /* Set transfer direction */
 724        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 725                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 726                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 727                vha->qla_stats.output_requests++;
 728        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 729                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 730                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 731                vha->qla_stats.input_requests++;
 732        }
 733
 734        /* One DSD is available in the Command Type 3 IOCB */
 735        avail_dsds = 1;
 736        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 737
 738        /* Load data segments */
 739
 740        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 741                dma_addr_t      sle_dma;
 742                cont_a64_entry_t *cont_pkt;
 743
 744                /* Allocate additional continuation packets? */
 745                if (avail_dsds == 0) {
 746                        /*
 747                         * Five DSDs are available in the Continuation
 748                         * Type 1 IOCB.
 749                         */
 750                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 751                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 752                        avail_dsds = 5;
 753                }
 754
 755                sle_dma = sg_dma_address(sg);
 756                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 757                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 758                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 759                avail_dsds--;
 760        }
 761}
 762
 763struct fw_dif_context {
 764        uint32_t ref_tag;
 765        uint16_t app_tag;
 766        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 767        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 768};
 769
 770/*
 771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 772 *
 773 */
 774static inline void
 775qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 776    unsigned int protcnt)
 777{
 778        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 779
 780        switch (scsi_get_prot_type(cmd)) {
 781        case SCSI_PROT_DIF_TYPE0:
 782                /*
 783                 * No check for ql2xenablehba_err_chk, as it would be an
 784                 * I/O error if hba tag generation is not done.
 785                 */
 786                pkt->ref_tag = cpu_to_le32((uint32_t)
 787                    (0xffffffff & scsi_get_lba(cmd)));
 788
 789                if (!qla2x00_hba_err_chk_enabled(sp))
 790                        break;
 791
 792                pkt->ref_tag_mask[0] = 0xff;
 793                pkt->ref_tag_mask[1] = 0xff;
 794                pkt->ref_tag_mask[2] = 0xff;
 795                pkt->ref_tag_mask[3] = 0xff;
 796                break;
 797
 798        /*
 799         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 800         * match LBA in CDB + N
 801         */
 802        case SCSI_PROT_DIF_TYPE2:
 803                pkt->app_tag = cpu_to_le16(0);
 804                pkt->app_tag_mask[0] = 0x0;
 805                pkt->app_tag_mask[1] = 0x0;
 806
 807                pkt->ref_tag = cpu_to_le32((uint32_t)
 808                    (0xffffffff & scsi_get_lba(cmd)));
 809
 810                if (!qla2x00_hba_err_chk_enabled(sp))
 811                        break;
 812
 813                /* enable ALL bytes of the ref tag */
 814                pkt->ref_tag_mask[0] = 0xff;
 815                pkt->ref_tag_mask[1] = 0xff;
 816                pkt->ref_tag_mask[2] = 0xff;
 817                pkt->ref_tag_mask[3] = 0xff;
 818                break;
 819
 820        /* For Type 3 protection: 16 bit GUARD only */
 821        case SCSI_PROT_DIF_TYPE3:
 822                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 823                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 824                                                                0x00;
 825                break;
 826
 827        /*
 828         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 829         * 16 bit app tag.
 830         */
 831        case SCSI_PROT_DIF_TYPE1:
 832                pkt->ref_tag = cpu_to_le32((uint32_t)
 833                    (0xffffffff & scsi_get_lba(cmd)));
 834                pkt->app_tag = cpu_to_le16(0);
 835                pkt->app_tag_mask[0] = 0x0;
 836                pkt->app_tag_mask[1] = 0x0;
 837
 838                if (!qla2x00_hba_err_chk_enabled(sp))
 839                        break;
 840
 841                /* enable ALL bytes of the ref tag */
 842                pkt->ref_tag_mask[0] = 0xff;
 843                pkt->ref_tag_mask[1] = 0xff;
 844                pkt->ref_tag_mask[2] = 0xff;
 845                pkt->ref_tag_mask[3] = 0xff;
 846                break;
 847        }
 848}
 849
 850int
 851qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 852        uint32_t *partial)
 853{
 854        struct scatterlist *sg;
 855        uint32_t cumulative_partial, sg_len;
 856        dma_addr_t sg_dma_addr;
 857
 858        if (sgx->num_bytes == sgx->tot_bytes)
 859                return 0;
 860
 861        sg = sgx->cur_sg;
 862        cumulative_partial = sgx->tot_partial;
 863
 864        sg_dma_addr = sg_dma_address(sg);
 865        sg_len = sg_dma_len(sg);
 866
 867        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 868
 869        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 870                sgx->dma_len = (blk_sz - cumulative_partial);
 871                sgx->tot_partial = 0;
 872                sgx->num_bytes += blk_sz;
 873                *partial = 0;
 874        } else {
 875                sgx->dma_len = sg_len - sgx->bytes_consumed;
 876                sgx->tot_partial += sgx->dma_len;
 877                *partial = 1;
 878        }
 879
 880        sgx->bytes_consumed += sgx->dma_len;
 881
 882        if (sg_len == sgx->bytes_consumed) {
 883                sg = sg_next(sg);
 884                sgx->num_sg++;
 885                sgx->cur_sg = sg;
 886                sgx->bytes_consumed = 0;
 887        }
 888
 889        return 1;
 890}
 891
 892int
 893qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 894        uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 895{
 896        void *next_dsd;
 897        uint8_t avail_dsds = 0;
 898        uint32_t dsd_list_len;
 899        struct dsd_dma *dsd_ptr;
 900        struct scatterlist *sg_prot;
 901        uint32_t *cur_dsd = dsd;
 902        uint16_t        used_dsds = tot_dsds;
 903        uint32_t        prot_int; /* protection interval */
 904        uint32_t        partial;
 905        struct qla2_sgx sgx;
 906        dma_addr_t      sle_dma;
 907        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 908        struct scsi_cmnd *cmd;
 909
 910        memset(&sgx, 0, sizeof(struct qla2_sgx));
 911        if (sp) {
 912                cmd = GET_CMD_SP(sp);
 913                prot_int = cmd->device->sector_size;
 914
 915                sgx.tot_bytes = scsi_bufflen(cmd);
 916                sgx.cur_sg = scsi_sglist(cmd);
 917                sgx.sp = sp;
 918
 919                sg_prot = scsi_prot_sglist(cmd);
 920        } else if (tc) {
 921                prot_int      = tc->blk_sz;
 922                sgx.tot_bytes = tc->bufflen;
 923                sgx.cur_sg    = tc->sg;
 924                sg_prot       = tc->prot_sg;
 925        } else {
 926                BUG();
 927                return 1;
 928        }
 929
 930        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 931
 932                sle_dma = sgx.dma_addr;
 933                sle_dma_len = sgx.dma_len;
 934alloc_and_fill:
 935                /* Allocate additional continuation packets? */
 936                if (avail_dsds == 0) {
 937                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 938                                        QLA_DSDS_PER_IOCB : used_dsds;
 939                        dsd_list_len = (avail_dsds + 1) * 12;
 940                        used_dsds -= avail_dsds;
 941
 942                        /* allocate tracking DS */
 943                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 944                        if (!dsd_ptr)
 945                                return 1;
 946
 947                        /* allocate new list */
 948                        dsd_ptr->dsd_addr = next_dsd =
 949                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 950                                &dsd_ptr->dsd_list_dma);
 951
 952                        if (!next_dsd) {
 953                                /*
 954                                 * Need to cleanup only this dsd_ptr, rest
 955                                 * will be done by sp_free_dma()
 956                                 */
 957                                kfree(dsd_ptr);
 958                                return 1;
 959                        }
 960
 961                        if (sp) {
 962                                list_add_tail(&dsd_ptr->list,
 963                                    &((struct crc_context *)
 964                                            sp->u.scmd.ctx)->dsd_list);
 965
 966                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 967                        } else {
 968                                list_add_tail(&dsd_ptr->list,
 969                                    &(tc->ctx->dsd_list));
 970                                *tc->ctx_dsd_alloced = 1;
 971                        }
 972
 973
 974                        /* add new list to cmd iocb or last list */
 975                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 976                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 977                        *cur_dsd++ = dsd_list_len;
 978                        cur_dsd = (uint32_t *)next_dsd;
 979                }
 980                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 981                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 982                *cur_dsd++ = cpu_to_le32(sle_dma_len);
 983                avail_dsds--;
 984
 985                if (partial == 0) {
 986                        /* Got a full protection interval */
 987                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 988                        sle_dma_len = 8;
 989
 990                        tot_prot_dma_len += sle_dma_len;
 991                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 992                                tot_prot_dma_len = 0;
 993                                sg_prot = sg_next(sg_prot);
 994                        }
 995
 996                        partial = 1; /* So as to not re-enter this block */
 997                        goto alloc_and_fill;
 998                }
 999        }
1000        /* Null termination */
1001        *cur_dsd++ = 0;
1002        *cur_dsd++ = 0;
1003        *cur_dsd++ = 0;
1004        return 0;
1005}
1006
1007int
1008qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009        uint16_t tot_dsds, struct qla_tc_param *tc)
1010{
1011        void *next_dsd;
1012        uint8_t avail_dsds = 0;
1013        uint32_t dsd_list_len;
1014        struct dsd_dma *dsd_ptr;
1015        struct scatterlist *sg, *sgl;
1016        uint32_t *cur_dsd = dsd;
1017        int     i;
1018        uint16_t        used_dsds = tot_dsds;
1019        struct scsi_cmnd *cmd;
1020
1021        if (sp) {
1022                cmd = GET_CMD_SP(sp);
1023                sgl = scsi_sglist(cmd);
1024        } else if (tc) {
1025                sgl = tc->sg;
1026        } else {
1027                BUG();
1028                return 1;
1029        }
1030
1031
1032        for_each_sg(sgl, sg, tot_dsds, i) {
1033                dma_addr_t      sle_dma;
1034
1035                /* Allocate additional continuation packets? */
1036                if (avail_dsds == 0) {
1037                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038                                        QLA_DSDS_PER_IOCB : used_dsds;
1039                        dsd_list_len = (avail_dsds + 1) * 12;
1040                        used_dsds -= avail_dsds;
1041
1042                        /* allocate tracking DS */
1043                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044                        if (!dsd_ptr)
1045                                return 1;
1046
1047                        /* allocate new list */
1048                        dsd_ptr->dsd_addr = next_dsd =
1049                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050                                &dsd_ptr->dsd_list_dma);
1051
1052                        if (!next_dsd) {
1053                                /*
1054                                 * Need to cleanup only this dsd_ptr, rest
1055                                 * will be done by sp_free_dma()
1056                                 */
1057                                kfree(dsd_ptr);
1058                                return 1;
1059                        }
1060
1061                        if (sp) {
1062                                list_add_tail(&dsd_ptr->list,
1063                                    &((struct crc_context *)
1064                                            sp->u.scmd.ctx)->dsd_list);
1065
1066                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067                        } else {
1068                                list_add_tail(&dsd_ptr->list,
1069                                    &(tc->ctx->dsd_list));
1070                                *tc->ctx_dsd_alloced = 1;
1071                        }
1072
1073                        /* add new list to cmd iocb or last list */
1074                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076                        *cur_dsd++ = dsd_list_len;
1077                        cur_dsd = (uint32_t *)next_dsd;
1078                }
1079                sle_dma = sg_dma_address(sg);
1080
1081                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084                avail_dsds--;
1085
1086        }
1087        /* Null termination */
1088        *cur_dsd++ = 0;
1089        *cur_dsd++ = 0;
1090        *cur_dsd++ = 0;
1091        return 0;
1092}
1093
1094int
1095qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096        uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1097{
1098        void *next_dsd;
1099        uint8_t avail_dsds = 0;
1100        uint32_t dsd_list_len;
1101        struct dsd_dma *dsd_ptr;
1102        struct scatterlist *sg, *sgl;
1103        int     i;
1104        struct scsi_cmnd *cmd;
1105        uint32_t *cur_dsd = dsd;
1106        uint16_t used_dsds = tot_dsds;
1107        struct scsi_qla_host *vha;
1108
1109        if (sp) {
1110                cmd = GET_CMD_SP(sp);
1111                sgl = scsi_prot_sglist(cmd);
1112                vha = sp->vha;
1113        } else if (tc) {
1114                vha = tc->vha;
1115                sgl = tc->prot_sg;
1116        } else {
1117                BUG();
1118                return 1;
1119        }
1120
1121        ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122                "%s: enter\n", __func__);
1123
1124        for_each_sg(sgl, sg, tot_dsds, i) {
1125                dma_addr_t      sle_dma;
1126
1127                /* Allocate additional continuation packets? */
1128                if (avail_dsds == 0) {
1129                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130                                                QLA_DSDS_PER_IOCB : used_dsds;
1131                        dsd_list_len = (avail_dsds + 1) * 12;
1132                        used_dsds -= avail_dsds;
1133
1134                        /* allocate tracking DS */
1135                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136                        if (!dsd_ptr)
1137                                return 1;
1138
1139                        /* allocate new list */
1140                        dsd_ptr->dsd_addr = next_dsd =
1141                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142                                &dsd_ptr->dsd_list_dma);
1143
1144                        if (!next_dsd) {
1145                                /*
1146                                 * Need to cleanup only this dsd_ptr, rest
1147                                 * will be done by sp_free_dma()
1148                                 */
1149                                kfree(dsd_ptr);
1150                                return 1;
1151                        }
1152
1153                        if (sp) {
1154                                list_add_tail(&dsd_ptr->list,
1155                                    &((struct crc_context *)
1156                                            sp->u.scmd.ctx)->dsd_list);
1157
1158                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159                        } else {
1160                                list_add_tail(&dsd_ptr->list,
1161                                    &(tc->ctx->dsd_list));
1162                                *tc->ctx_dsd_alloced = 1;
1163                        }
1164
1165                        /* add new list to cmd iocb or last list */
1166                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168                        *cur_dsd++ = dsd_list_len;
1169                        cur_dsd = (uint32_t *)next_dsd;
1170                }
1171                sle_dma = sg_dma_address(sg);
1172
1173                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1176
1177                avail_dsds--;
1178        }
1179        /* Null termination */
1180        *cur_dsd++ = 0;
1181        *cur_dsd++ = 0;
1182        *cur_dsd++ = 0;
1183        return 0;
1184}
1185
1186/**
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 *                                                      Type 6 IOCB types.
1189 *
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1193 */
1194inline int
1195qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197{
1198        uint32_t                *cur_dsd, *fcp_dl;
1199        scsi_qla_host_t         *vha;
1200        struct scsi_cmnd        *cmd;
1201        uint32_t                total_bytes = 0;
1202        uint32_t                data_bytes;
1203        uint32_t                dif_bytes;
1204        uint8_t                 bundling = 1;
1205        uint16_t                blk_size;
1206        uint8_t                 *clr_ptr;
1207        struct crc_context      *crc_ctx_pkt = NULL;
1208        struct qla_hw_data      *ha;
1209        uint8_t                 additional_fcpcdb_len;
1210        uint16_t                fcp_cmnd_len;
1211        struct fcp_cmnd         *fcp_cmnd;
1212        dma_addr_t              crc_ctx_dma;
1213
1214        cmd = GET_CMD_SP(sp);
1215
1216        /* Update entry type to indicate Command Type CRC_2 IOCB */
1217        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1218
1219        vha = sp->vha;
1220        ha = vha->hw;
1221
1222        /* No data transfer */
1223        data_bytes = scsi_bufflen(cmd);
1224        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225                cmd_pkt->byte_count = cpu_to_le32(0);
1226                return QLA_SUCCESS;
1227        }
1228
1229        cmd_pkt->vp_index = sp->vha->vp_idx;
1230
1231        /* Set transfer direction */
1232        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233                cmd_pkt->control_flags =
1234                    cpu_to_le16(CF_WRITE_DATA);
1235        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236                cmd_pkt->control_flags =
1237                    cpu_to_le16(CF_READ_DATA);
1238        }
1239
1240        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1244                bundling = 0;
1245
1246        /* Allocate CRC context from global pool */
1247        crc_ctx_pkt = sp->u.scmd.ctx =
1248            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1249
1250        if (!crc_ctx_pkt)
1251                goto crc_queuing_error;
1252
1253        /* Zero out CTX area. */
1254        clr_ptr = (uint8_t *)crc_ctx_pkt;
1255        memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1256
1257        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1258
1259        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1260
1261        /* Set handle */
1262        crc_ctx_pkt->handle = cmd_pkt->handle;
1263
1264        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1265
1266        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1268
1269        cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270        cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271        cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1272
1273        /* Determine SCSI command length -- align to 4 byte boundary */
1274        if (cmd->cmd_len > 16) {
1275                additional_fcpcdb_len = cmd->cmd_len - 16;
1276                if ((cmd->cmd_len % 4) != 0) {
1277                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1278                        goto crc_queuing_error;
1279                }
1280                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281        } else {
1282                additional_fcpcdb_len = 0;
1283                fcp_cmnd_len = 12 + 16 + 4;
1284        }
1285
1286        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1287
1288        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290                fcp_cmnd->additional_cdb_len |= 1;
1291        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292                fcp_cmnd->additional_cdb_len |= 2;
1293
1294        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297        cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298            LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299        cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300            MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301        fcp_cmnd->task_management = 0;
1302        fcp_cmnd->task_attribute = TSK_SIMPLE;
1303
1304        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1305
1306        /* Compute dif len and adjust data len to incude protection */
1307        dif_bytes = 0;
1308        blk_size = cmd->device->sector_size;
1309        dif_bytes = (data_bytes / blk_size) * 8;
1310
1311        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312        case SCSI_PROT_READ_INSERT:
1313        case SCSI_PROT_WRITE_STRIP:
1314            total_bytes = data_bytes;
1315            data_bytes += dif_bytes;
1316            break;
1317
1318        case SCSI_PROT_READ_STRIP:
1319        case SCSI_PROT_WRITE_INSERT:
1320        case SCSI_PROT_READ_PASS:
1321        case SCSI_PROT_WRITE_PASS:
1322            total_bytes = data_bytes + dif_bytes;
1323            break;
1324        default:
1325            BUG();
1326        }
1327
1328        if (!qla2x00_hba_err_chk_enabled(sp))
1329                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330        /* HBA error checking enabled */
1331        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334                        SCSI_PROT_DIF_TYPE2))
1335                        fw_prot_opts |= BIT_10;
1336                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337                    SCSI_PROT_DIF_TYPE3)
1338                        fw_prot_opts |= BIT_11;
1339        }
1340
1341        if (!bundling) {
1342                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343        } else {
1344                /*
1345                 * Configure Bundling if we need to fetch interlaving
1346                 * protection PCI accesses
1347                 */
1348                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351                                                        tot_prot_dsds);
1352                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1353        }
1354
1355        /* Finish the common fields of CRC pkt */
1356        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360        /* Fibre channel byte count */
1361        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362        fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363            additional_fcpcdb_len);
1364        *fcp_dl = htonl(total_bytes);
1365
1366        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367                cmd_pkt->byte_count = cpu_to_le32(0);
1368                return QLA_SUCCESS;
1369        }
1370        /* Walks data segments */
1371
1372        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1373
1374        if (!bundling && tot_prot_dsds) {
1375                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376                        cur_dsd, tot_dsds, NULL))
1377                        goto crc_queuing_error;
1378        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379                        (tot_dsds - tot_prot_dsds), NULL))
1380                goto crc_queuing_error;
1381
1382        if (bundling && tot_prot_dsds) {
1383                /* Walks dif segments */
1384                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387                                tot_prot_dsds, NULL))
1388                        goto crc_queuing_error;
1389        }
1390        return QLA_SUCCESS;
1391
1392crc_queuing_error:
1393        /* Cleanup will be performed by the caller */
1394
1395        return QLA_FUNCTION_FAILED;
1396}
1397
1398/**
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1401 *
1402 * Returns non-zero if a failure occurred, else zero.
1403 */
1404int
1405qla24xx_start_scsi(srb_t *sp)
1406{
1407        int             nseg;
1408        unsigned long   flags;
1409        uint32_t        *clr_ptr;
1410        uint32_t        index;
1411        uint32_t        handle;
1412        struct cmd_type_7 *cmd_pkt;
1413        uint16_t        cnt;
1414        uint16_t        req_cnt;
1415        uint16_t        tot_dsds;
1416        struct req_que *req = NULL;
1417        struct rsp_que *rsp = NULL;
1418        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419        struct scsi_qla_host *vha = sp->vha;
1420        struct qla_hw_data *ha = vha->hw;
1421
1422        /* Setup device pointers. */
1423        req = vha->req;
1424        rsp = req->rsp;
1425
1426        /* So we know we haven't pci_map'ed anything yet */
1427        tot_dsds = 0;
1428
1429        /* Send marker if required */
1430        if (vha->marker_needed != 0) {
1431                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432                    QLA_SUCCESS)
1433                        return QLA_FUNCTION_FAILED;
1434                vha->marker_needed = 0;
1435        }
1436
1437        /* Acquire ring specific lock */
1438        spin_lock_irqsave(&ha->hardware_lock, flags);
1439
1440        /* Check for room in outstanding command list. */
1441        handle = req->current_outstanding_cmd;
1442        for (index = 1; index < req->num_outstanding_cmds; index++) {
1443                handle++;
1444                if (handle == req->num_outstanding_cmds)
1445                        handle = 1;
1446                if (!req->outstanding_cmds[handle])
1447                        break;
1448        }
1449        if (index == req->num_outstanding_cmds)
1450                goto queuing_error;
1451
1452        /* Map the sg table so we have an accurate count of sg entries needed */
1453        if (scsi_sg_count(cmd)) {
1454                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455                    scsi_sg_count(cmd), cmd->sc_data_direction);
1456                if (unlikely(!nseg))
1457                        goto queuing_error;
1458        } else
1459                nseg = 0;
1460
1461        tot_dsds = nseg;
1462        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463        if (req->cnt < (req_cnt + 2)) {
1464                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465                    RD_REG_DWORD_RELAXED(req->req_q_out);
1466                if (req->ring_index < cnt)
1467                        req->cnt = cnt - req->ring_index;
1468                else
1469                        req->cnt = req->length -
1470                                (req->ring_index - cnt);
1471                if (req->cnt < (req_cnt + 2))
1472                        goto queuing_error;
1473        }
1474
1475        /* Build command packet. */
1476        req->current_outstanding_cmd = handle;
1477        req->outstanding_cmds[handle] = sp;
1478        sp->handle = handle;
1479        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480        req->cnt -= req_cnt;
1481
1482        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1484
1485        /* Zero out remaining portion of packet. */
1486        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487        clr_ptr = (uint32_t *)cmd_pkt + 2;
1488        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1490
1491        /* Set NPORT-ID and LUN number*/
1492        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496        cmd_pkt->vp_index = sp->vha->vp_idx;
1497
1498        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1500
1501        cmd_pkt->task = TSK_SIMPLE;
1502
1503        /* Load SCSI command packet. */
1504        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1506
1507        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1508
1509        /* Build IOCB segments */
1510        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1511
1512        /* Set total data segment count. */
1513        cmd_pkt->entry_count = (uint8_t)req_cnt;
1514        wmb();
1515        /* Adjust ring index. */
1516        req->ring_index++;
1517        if (req->ring_index == req->length) {
1518                req->ring_index = 0;
1519                req->ring_ptr = req->ring;
1520        } else
1521                req->ring_ptr++;
1522
1523        sp->flags |= SRB_DMA_VALID;
1524
1525        /* Set chip new ring index. */
1526        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1528
1529        /* Manage unprocessed RIO/ZIO commands in response queue. */
1530        if (vha->flags.process_response_queue &&
1531                rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1532                qla24xx_process_response_queue(vha, rsp);
1533
1534        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1535        return QLA_SUCCESS;
1536
1537queuing_error:
1538        if (tot_dsds)
1539                scsi_dma_unmap(cmd);
1540
1541        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1542
1543        return QLA_FUNCTION_FAILED;
1544}
1545
1546/**
1547 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548 * @sp: command to send to the ISP
1549 *
1550 * Returns non-zero if a failure occurred, else zero.
1551 */
1552int
1553qla24xx_dif_start_scsi(srb_t *sp)
1554{
1555        int                     nseg;
1556        unsigned long           flags;
1557        uint32_t                *clr_ptr;
1558        uint32_t                index;
1559        uint32_t                handle;
1560        uint16_t                cnt;
1561        uint16_t                req_cnt = 0;
1562        uint16_t                tot_dsds;
1563        uint16_t                tot_prot_dsds;
1564        uint16_t                fw_prot_opts = 0;
1565        struct req_que          *req = NULL;
1566        struct rsp_que          *rsp = NULL;
1567        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1568        struct scsi_qla_host    *vha = sp->vha;
1569        struct qla_hw_data      *ha = vha->hw;
1570        struct cmd_type_crc_2   *cmd_pkt;
1571        uint32_t                status = 0;
1572
1573#define QDSS_GOT_Q_SPACE        BIT_0
1574
1575        /* Only process protection or >16 cdb in this routine */
1576        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577                if (cmd->cmd_len <= 16)
1578                        return qla24xx_start_scsi(sp);
1579        }
1580
1581        /* Setup device pointers. */
1582        req = vha->req;
1583        rsp = req->rsp;
1584
1585        /* So we know we haven't pci_map'ed anything yet */
1586        tot_dsds = 0;
1587
1588        /* Send marker if required */
1589        if (vha->marker_needed != 0) {
1590                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1591                    QLA_SUCCESS)
1592                        return QLA_FUNCTION_FAILED;
1593                vha->marker_needed = 0;
1594        }
1595
1596        /* Acquire ring specific lock */
1597        spin_lock_irqsave(&ha->hardware_lock, flags);
1598
1599        /* Check for room in outstanding command list. */
1600        handle = req->current_outstanding_cmd;
1601        for (index = 1; index < req->num_outstanding_cmds; index++) {
1602                handle++;
1603                if (handle == req->num_outstanding_cmds)
1604                        handle = 1;
1605                if (!req->outstanding_cmds[handle])
1606                        break;
1607        }
1608
1609        if (index == req->num_outstanding_cmds)
1610                goto queuing_error;
1611
1612        /* Compute number of required data segments */
1613        /* Map the sg table so we have an accurate count of sg entries needed */
1614        if (scsi_sg_count(cmd)) {
1615                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616                    scsi_sg_count(cmd), cmd->sc_data_direction);
1617                if (unlikely(!nseg))
1618                        goto queuing_error;
1619                else
1620                        sp->flags |= SRB_DMA_VALID;
1621
1622                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624                        struct qla2_sgx sgx;
1625                        uint32_t        partial;
1626
1627                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1628                        sgx.tot_bytes = scsi_bufflen(cmd);
1629                        sgx.cur_sg = scsi_sglist(cmd);
1630                        sgx.sp = sp;
1631
1632                        nseg = 0;
1633                        while (qla24xx_get_one_block_sg(
1634                            cmd->device->sector_size, &sgx, &partial))
1635                                nseg++;
1636                }
1637        } else
1638                nseg = 0;
1639
1640        /* number of required data segments */
1641        tot_dsds = nseg;
1642
1643        /* Compute number of required protection segments */
1644        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647                if (unlikely(!nseg))
1648                        goto queuing_error;
1649                else
1650                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1651
1652                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1655                }
1656        } else {
1657                nseg = 0;
1658        }
1659
1660        req_cnt = 1;
1661        /* Total Data and protection sg segment(s) */
1662        tot_prot_dsds = nseg;
1663        tot_dsds += nseg;
1664        if (req->cnt < (req_cnt + 2)) {
1665                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666                    RD_REG_DWORD_RELAXED(req->req_q_out);
1667                if (req->ring_index < cnt)
1668                        req->cnt = cnt - req->ring_index;
1669                else
1670                        req->cnt = req->length -
1671                                (req->ring_index - cnt);
1672                if (req->cnt < (req_cnt + 2))
1673                        goto queuing_error;
1674        }
1675
1676        status |= QDSS_GOT_Q_SPACE;
1677
1678        /* Build header part of command packet (excluding the OPCODE). */
1679        req->current_outstanding_cmd = handle;
1680        req->outstanding_cmds[handle] = sp;
1681        sp->handle = handle;
1682        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1683        req->cnt -= req_cnt;
1684
1685        /* Fill-in common area */
1686        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1688
1689        clr_ptr = (uint32_t *)cmd_pkt + 2;
1690        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1691
1692        /* Set NPORT-ID and LUN number*/
1693        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1697
1698        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1699        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1700
1701        /* Total Data and protection segment(s) */
1702        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1703
1704        /* Build IOCB segments and adjust for data protection segments */
1705        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1707                QLA_SUCCESS)
1708                goto queuing_error;
1709
1710        cmd_pkt->entry_count = (uint8_t)req_cnt;
1711        /* Specify response queue number where completion should happen */
1712        cmd_pkt->entry_status = (uint8_t) rsp->id;
1713        cmd_pkt->timeout = cpu_to_le16(0);
1714        wmb();
1715
1716        /* Adjust ring index. */
1717        req->ring_index++;
1718        if (req->ring_index == req->length) {
1719                req->ring_index = 0;
1720                req->ring_ptr = req->ring;
1721        } else
1722                req->ring_ptr++;
1723
1724        /* Set chip new ring index. */
1725        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1727
1728        /* Manage unprocessed RIO/ZIO commands in response queue. */
1729        if (vha->flags.process_response_queue &&
1730            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731                qla24xx_process_response_queue(vha, rsp);
1732
1733        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1734
1735        return QLA_SUCCESS;
1736
1737queuing_error:
1738        if (status & QDSS_GOT_Q_SPACE) {
1739                req->outstanding_cmds[handle] = NULL;
1740                req->cnt += req_cnt;
1741        }
1742        /* Cleanup will be performed by the caller (queuecommand) */
1743
1744        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1745        return QLA_FUNCTION_FAILED;
1746}
1747
1748/**
1749 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750 * @sp: command to send to the ISP
1751 *
1752 * Returns non-zero if a failure occurred, else zero.
1753 */
1754static int
1755qla2xxx_start_scsi_mq(srb_t *sp)
1756{
1757        int             nseg;
1758        unsigned long   flags;
1759        uint32_t        *clr_ptr;
1760        uint32_t        index;
1761        uint32_t        handle;
1762        struct cmd_type_7 *cmd_pkt;
1763        uint16_t        cnt;
1764        uint16_t        req_cnt;
1765        uint16_t        tot_dsds;
1766        struct req_que *req = NULL;
1767        struct rsp_que *rsp = NULL;
1768        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1769        struct scsi_qla_host *vha = sp->fcport->vha;
1770        struct qla_hw_data *ha = vha->hw;
1771        struct qla_qpair *qpair = sp->qpair;
1772
1773        /* Acquire qpair specific lock */
1774        spin_lock_irqsave(&qpair->qp_lock, flags);
1775
1776        /* Setup qpair pointers */
1777        rsp = qpair->rsp;
1778        req = qpair->req;
1779
1780        /* So we know we haven't pci_map'ed anything yet */
1781        tot_dsds = 0;
1782
1783        /* Send marker if required */
1784        if (vha->marker_needed != 0) {
1785                if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1786                    QLA_SUCCESS) {
1787                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1788                        return QLA_FUNCTION_FAILED;
1789                }
1790                vha->marker_needed = 0;
1791        }
1792
1793        /* Check for room in outstanding command list. */
1794        handle = req->current_outstanding_cmd;
1795        for (index = 1; index < req->num_outstanding_cmds; index++) {
1796                handle++;
1797                if (handle == req->num_outstanding_cmds)
1798                        handle = 1;
1799                if (!req->outstanding_cmds[handle])
1800                        break;
1801        }
1802        if (index == req->num_outstanding_cmds)
1803                goto queuing_error;
1804
1805        /* Map the sg table so we have an accurate count of sg entries needed */
1806        if (scsi_sg_count(cmd)) {
1807                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1808                    scsi_sg_count(cmd), cmd->sc_data_direction);
1809                if (unlikely(!nseg))
1810                        goto queuing_error;
1811        } else
1812                nseg = 0;
1813
1814        tot_dsds = nseg;
1815        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1816        if (req->cnt < (req_cnt + 2)) {
1817                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1818                    RD_REG_DWORD_RELAXED(req->req_q_out);
1819                if (req->ring_index < cnt)
1820                        req->cnt = cnt - req->ring_index;
1821                else
1822                        req->cnt = req->length -
1823                                (req->ring_index - cnt);
1824                if (req->cnt < (req_cnt + 2))
1825                        goto queuing_error;
1826        }
1827
1828        /* Build command packet. */
1829        req->current_outstanding_cmd = handle;
1830        req->outstanding_cmds[handle] = sp;
1831        sp->handle = handle;
1832        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1833        req->cnt -= req_cnt;
1834
1835        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1836        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1837
1838        /* Zero out remaining portion of packet. */
1839        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1840        clr_ptr = (uint32_t *)cmd_pkt + 2;
1841        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1842        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1843
1844        /* Set NPORT-ID and LUN number*/
1845        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1846        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1847        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1848        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1849        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1850
1851        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1852        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1853
1854        cmd_pkt->task = TSK_SIMPLE;
1855
1856        /* Load SCSI command packet. */
1857        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1858        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1859
1860        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1861
1862        /* Build IOCB segments */
1863        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1864
1865        /* Set total data segment count. */
1866        cmd_pkt->entry_count = (uint8_t)req_cnt;
1867        wmb();
1868        /* Adjust ring index. */
1869        req->ring_index++;
1870        if (req->ring_index == req->length) {
1871                req->ring_index = 0;
1872                req->ring_ptr = req->ring;
1873        } else
1874                req->ring_ptr++;
1875
1876        sp->flags |= SRB_DMA_VALID;
1877
1878        /* Set chip new ring index. */
1879        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1880
1881        /* Manage unprocessed RIO/ZIO commands in response queue. */
1882        if (vha->flags.process_response_queue &&
1883                rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1884                qla24xx_process_response_queue(vha, rsp);
1885
1886        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1887        return QLA_SUCCESS;
1888
1889queuing_error:
1890        if (tot_dsds)
1891                scsi_dma_unmap(cmd);
1892
1893        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1894
1895        return QLA_FUNCTION_FAILED;
1896}
1897
1898
1899/**
1900 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1901 * @sp: command to send to the ISP
1902 *
1903 * Returns non-zero if a failure occurred, else zero.
1904 */
1905int
1906qla2xxx_dif_start_scsi_mq(srb_t *sp)
1907{
1908        int                     nseg;
1909        unsigned long           flags;
1910        uint32_t                *clr_ptr;
1911        uint32_t                index;
1912        uint32_t                handle;
1913        uint16_t                cnt;
1914        uint16_t                req_cnt = 0;
1915        uint16_t                tot_dsds;
1916        uint16_t                tot_prot_dsds;
1917        uint16_t                fw_prot_opts = 0;
1918        struct req_que          *req = NULL;
1919        struct rsp_que          *rsp = NULL;
1920        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1921        struct scsi_qla_host    *vha = sp->fcport->vha;
1922        struct qla_hw_data      *ha = vha->hw;
1923        struct cmd_type_crc_2   *cmd_pkt;
1924        uint32_t                status = 0;
1925        struct qla_qpair        *qpair = sp->qpair;
1926
1927#define QDSS_GOT_Q_SPACE        BIT_0
1928
1929        /* Check for host side state */
1930        if (!qpair->online) {
1931                cmd->result = DID_NO_CONNECT << 16;
1932                return QLA_INTERFACE_ERROR;
1933        }
1934
1935        if (!qpair->difdix_supported &&
1936                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1937                cmd->result = DID_NO_CONNECT << 16;
1938                return QLA_INTERFACE_ERROR;
1939        }
1940
1941        /* Only process protection or >16 cdb in this routine */
1942        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1943                if (cmd->cmd_len <= 16)
1944                        return qla2xxx_start_scsi_mq(sp);
1945        }
1946
1947        spin_lock_irqsave(&qpair->qp_lock, flags);
1948
1949        /* Setup qpair pointers */
1950        rsp = qpair->rsp;
1951        req = qpair->req;
1952
1953        /* So we know we haven't pci_map'ed anything yet */
1954        tot_dsds = 0;
1955
1956        /* Send marker if required */
1957        if (vha->marker_needed != 0) {
1958                if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1959                    QLA_SUCCESS) {
1960                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1961                        return QLA_FUNCTION_FAILED;
1962                }
1963                vha->marker_needed = 0;
1964        }
1965
1966        /* Check for room in outstanding command list. */
1967        handle = req->current_outstanding_cmd;
1968        for (index = 1; index < req->num_outstanding_cmds; index++) {
1969                handle++;
1970                if (handle == req->num_outstanding_cmds)
1971                        handle = 1;
1972                if (!req->outstanding_cmds[handle])
1973                        break;
1974        }
1975
1976        if (index == req->num_outstanding_cmds)
1977                goto queuing_error;
1978
1979        /* Compute number of required data segments */
1980        /* Map the sg table so we have an accurate count of sg entries needed */
1981        if (scsi_sg_count(cmd)) {
1982                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1983                    scsi_sg_count(cmd), cmd->sc_data_direction);
1984                if (unlikely(!nseg))
1985                        goto queuing_error;
1986                else
1987                        sp->flags |= SRB_DMA_VALID;
1988
1989                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1990                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1991                        struct qla2_sgx sgx;
1992                        uint32_t        partial;
1993
1994                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1995                        sgx.tot_bytes = scsi_bufflen(cmd);
1996                        sgx.cur_sg = scsi_sglist(cmd);
1997                        sgx.sp = sp;
1998
1999                        nseg = 0;
2000                        while (qla24xx_get_one_block_sg(
2001                            cmd->device->sector_size, &sgx, &partial))
2002                                nseg++;
2003                }
2004        } else
2005                nseg = 0;
2006
2007        /* number of required data segments */
2008        tot_dsds = nseg;
2009
2010        /* Compute number of required protection segments */
2011        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2012                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2013                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2014                if (unlikely(!nseg))
2015                        goto queuing_error;
2016                else
2017                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
2018
2019                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2020                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2021                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2022                }
2023        } else {
2024                nseg = 0;
2025        }
2026
2027        req_cnt = 1;
2028        /* Total Data and protection sg segment(s) */
2029        tot_prot_dsds = nseg;
2030        tot_dsds += nseg;
2031        if (req->cnt < (req_cnt + 2)) {
2032                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2033                    RD_REG_DWORD_RELAXED(req->req_q_out);
2034                if (req->ring_index < cnt)
2035                        req->cnt = cnt - req->ring_index;
2036                else
2037                        req->cnt = req->length -
2038                                (req->ring_index - cnt);
2039                if (req->cnt < (req_cnt + 2))
2040                        goto queuing_error;
2041        }
2042
2043        status |= QDSS_GOT_Q_SPACE;
2044
2045        /* Build header part of command packet (excluding the OPCODE). */
2046        req->current_outstanding_cmd = handle;
2047        req->outstanding_cmds[handle] = sp;
2048        sp->handle = handle;
2049        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2050        req->cnt -= req_cnt;
2051
2052        /* Fill-in common area */
2053        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2054        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2055
2056        clr_ptr = (uint32_t *)cmd_pkt + 2;
2057        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2058
2059        /* Set NPORT-ID and LUN number*/
2060        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2061        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2062        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2063        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2064
2065        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2066        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2067
2068        /* Total Data and protection segment(s) */
2069        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2070
2071        /* Build IOCB segments and adjust for data protection segments */
2072        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2073            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2074                QLA_SUCCESS)
2075                goto queuing_error;
2076
2077        cmd_pkt->entry_count = (uint8_t)req_cnt;
2078        cmd_pkt->timeout = cpu_to_le16(0);
2079        wmb();
2080
2081        /* Adjust ring index. */
2082        req->ring_index++;
2083        if (req->ring_index == req->length) {
2084                req->ring_index = 0;
2085                req->ring_ptr = req->ring;
2086        } else
2087                req->ring_ptr++;
2088
2089        /* Set chip new ring index. */
2090        WRT_REG_DWORD(req->req_q_in, req->ring_index);
2091
2092        /* Manage unprocessed RIO/ZIO commands in response queue. */
2093        if (vha->flags.process_response_queue &&
2094            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2095                qla24xx_process_response_queue(vha, rsp);
2096
2097        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2098
2099        return QLA_SUCCESS;
2100
2101queuing_error:
2102        if (status & QDSS_GOT_Q_SPACE) {
2103                req->outstanding_cmds[handle] = NULL;
2104                req->cnt += req_cnt;
2105        }
2106        /* Cleanup will be performed by the caller (queuecommand) */
2107
2108        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2109        return QLA_FUNCTION_FAILED;
2110}
2111
2112/* Generic Control-SRB manipulation functions. */
2113
2114/* hardware_lock assumed to be held. */
2115
2116void *
2117__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2118{
2119        scsi_qla_host_t *vha = qpair->vha;
2120        struct qla_hw_data *ha = vha->hw;
2121        struct req_que *req = qpair->req;
2122        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2123        uint32_t index, handle;
2124        request_t *pkt;
2125        uint16_t cnt, req_cnt;
2126
2127        pkt = NULL;
2128        req_cnt = 1;
2129        handle = 0;
2130
2131        if (!sp)
2132                goto skip_cmd_array;
2133
2134        /* Check for room in outstanding command list. */
2135        handle = req->current_outstanding_cmd;
2136        for (index = 1; index < req->num_outstanding_cmds; index++) {
2137                handle++;
2138                if (handle == req->num_outstanding_cmds)
2139                        handle = 1;
2140                if (!req->outstanding_cmds[handle])
2141                        break;
2142        }
2143        if (index == req->num_outstanding_cmds) {
2144                ql_log(ql_log_warn, vha, 0x700b,
2145                    "No room on outstanding cmd array.\n");
2146                goto queuing_error;
2147        }
2148
2149        /* Prep command array. */
2150        req->current_outstanding_cmd = handle;
2151        req->outstanding_cmds[handle] = sp;
2152        sp->handle = handle;
2153
2154        /* Adjust entry-counts as needed. */
2155        if (sp->type != SRB_SCSI_CMD)
2156                req_cnt = sp->iocbs;
2157
2158skip_cmd_array:
2159        /* Check for room on request queue. */
2160        if (req->cnt < req_cnt + 2) {
2161                if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2162                        cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2163                else if (IS_P3P_TYPE(ha))
2164                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2165                else if (IS_FWI2_CAPABLE(ha))
2166                        cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2167                else if (IS_QLAFX00(ha))
2168                        cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2169                else
2170                        cnt = qla2x00_debounce_register(
2171                            ISP_REQ_Q_OUT(ha, &reg->isp));
2172
2173                if  (req->ring_index < cnt)
2174                        req->cnt = cnt - req->ring_index;
2175                else
2176                        req->cnt = req->length -
2177                            (req->ring_index - cnt);
2178        }
2179        if (req->cnt < req_cnt + 2)
2180                goto queuing_error;
2181
2182        /* Prep packet */
2183        req->cnt -= req_cnt;
2184        pkt = req->ring_ptr;
2185        memset(pkt, 0, REQUEST_ENTRY_SIZE);
2186        if (IS_QLAFX00(ha)) {
2187                WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2188                WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2189        } else {
2190                pkt->entry_count = req_cnt;
2191                pkt->handle = handle;
2192        }
2193
2194queuing_error:
2195        qpair->tgt_counters.num_alloc_iocb_failed++;
2196        return pkt;
2197}
2198
2199void *
2200qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2201{
2202        scsi_qla_host_t *vha = qpair->vha;
2203
2204        if (qla2x00_reset_active(vha))
2205                return NULL;
2206
2207        return __qla2x00_alloc_iocbs(qpair, sp);
2208}
2209
2210void *
2211qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2212{
2213        return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2214}
2215
2216static void
2217qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2218{
2219        struct srb_iocb *lio = &sp->u.iocb_cmd;
2220
2221        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2222        logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2223        if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2224                logio->control_flags |= LCF_NVME_PRLI;
2225
2226        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2227        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2228        logio->port_id[1] = sp->fcport->d_id.b.area;
2229        logio->port_id[2] = sp->fcport->d_id.b.domain;
2230        logio->vp_index = sp->vha->vp_idx;
2231}
2232
2233static void
2234qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2235{
2236        struct srb_iocb *lio = &sp->u.iocb_cmd;
2237
2238        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2239        logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2240
2241        if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2242                logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2243        if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2244                logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2245        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2246        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2247        logio->port_id[1] = sp->fcport->d_id.b.area;
2248        logio->port_id[2] = sp->fcport->d_id.b.domain;
2249        logio->vp_index = sp->vha->vp_idx;
2250}
2251
2252static void
2253qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2254{
2255        struct qla_hw_data *ha = sp->vha->hw;
2256        struct srb_iocb *lio = &sp->u.iocb_cmd;
2257        uint16_t opts;
2258
2259        mbx->entry_type = MBX_IOCB_TYPE;
2260        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2261        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2262        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2263        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2264        if (HAS_EXTENDED_IDS(ha)) {
2265                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2266                mbx->mb10 = cpu_to_le16(opts);
2267        } else {
2268                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2269        }
2270        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2271        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2272            sp->fcport->d_id.b.al_pa);
2273        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2274}
2275
2276static void
2277qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2278{
2279        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2280        logio->control_flags =
2281            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2282        if (!sp->fcport->se_sess ||
2283            !sp->fcport->keep_nport_handle)
2284                logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2285        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2286        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2287        logio->port_id[1] = sp->fcport->d_id.b.area;
2288        logio->port_id[2] = sp->fcport->d_id.b.domain;
2289        logio->vp_index = sp->vha->vp_idx;
2290}
2291
2292static void
2293qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2294{
2295        struct qla_hw_data *ha = sp->vha->hw;
2296
2297        mbx->entry_type = MBX_IOCB_TYPE;
2298        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2299        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2300        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2301            cpu_to_le16(sp->fcport->loop_id):
2302            cpu_to_le16(sp->fcport->loop_id << 8);
2303        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2304        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2305            sp->fcport->d_id.b.al_pa);
2306        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2307        /* Implicit: mbx->mbx10 = 0. */
2308}
2309
2310static void
2311qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2312{
2313        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2314        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2315        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2316        logio->vp_index = sp->vha->vp_idx;
2317}
2318
2319static void
2320qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2321{
2322        struct qla_hw_data *ha = sp->vha->hw;
2323
2324        mbx->entry_type = MBX_IOCB_TYPE;
2325        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2326        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2327        if (HAS_EXTENDED_IDS(ha)) {
2328                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2329                mbx->mb10 = cpu_to_le16(BIT_0);
2330        } else {
2331                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2332        }
2333        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2334        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2335        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2336        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2337        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2338}
2339
2340static void
2341qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2342{
2343        uint32_t flags;
2344        uint64_t lun;
2345        struct fc_port *fcport = sp->fcport;
2346        scsi_qla_host_t *vha = fcport->vha;
2347        struct qla_hw_data *ha = vha->hw;
2348        struct srb_iocb *iocb = &sp->u.iocb_cmd;
2349        struct req_que *req = vha->req;
2350
2351        flags = iocb->u.tmf.flags;
2352        lun = iocb->u.tmf.lun;
2353
2354        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2355        tsk->entry_count = 1;
2356        tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2357        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2358        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2359        tsk->control_flags = cpu_to_le32(flags);
2360        tsk->port_id[0] = fcport->d_id.b.al_pa;
2361        tsk->port_id[1] = fcport->d_id.b.area;
2362        tsk->port_id[2] = fcport->d_id.b.domain;
2363        tsk->vp_index = fcport->vha->vp_idx;
2364
2365        if (flags == TCF_LUN_RESET) {
2366                int_to_scsilun(lun, &tsk->lun);
2367                host_to_fcp_swap((uint8_t *)&tsk->lun,
2368                        sizeof(tsk->lun));
2369        }
2370}
2371
2372static void
2373qla2x00_els_dcmd_sp_free(void *data)
2374{
2375        srb_t *sp = data;
2376        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2377
2378        kfree(sp->fcport);
2379
2380        if (elsio->u.els_logo.els_logo_pyld)
2381                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2382                    elsio->u.els_logo.els_logo_pyld,
2383                    elsio->u.els_logo.els_logo_pyld_dma);
2384
2385        del_timer(&elsio->timer);
2386        qla2x00_rel_sp(sp);
2387}
2388
2389static void
2390qla2x00_els_dcmd_iocb_timeout(void *data)
2391{
2392        srb_t *sp = data;
2393        fc_port_t *fcport = sp->fcport;
2394        struct scsi_qla_host *vha = sp->vha;
2395        struct qla_hw_data *ha = vha->hw;
2396        struct srb_iocb *lio = &sp->u.iocb_cmd;
2397        unsigned long flags = 0;
2398
2399        ql_dbg(ql_dbg_io, vha, 0x3069,
2400            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2401            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2402            fcport->d_id.b.al_pa);
2403
2404        /* Abort the exchange */
2405        spin_lock_irqsave(&ha->hardware_lock, flags);
2406        if (ha->isp_ops->abort_command(sp)) {
2407                ql_dbg(ql_dbg_io, vha, 0x3070,
2408                    "mbx abort_command failed.\n");
2409        } else {
2410                ql_dbg(ql_dbg_io, vha, 0x3071,
2411                    "mbx abort_command success.\n");
2412        }
2413        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2414
2415        complete(&lio->u.els_logo.comp);
2416}
2417
2418static void
2419qla2x00_els_dcmd_sp_done(void *ptr, int res)
2420{
2421        srb_t *sp = ptr;
2422        fc_port_t *fcport = sp->fcport;
2423        struct srb_iocb *lio = &sp->u.iocb_cmd;
2424        struct scsi_qla_host *vha = sp->vha;
2425
2426        ql_dbg(ql_dbg_io, vha, 0x3072,
2427            "%s hdl=%x, portid=%02x%02x%02x done\n",
2428            sp->name, sp->handle, fcport->d_id.b.domain,
2429            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2430
2431        complete(&lio->u.els_logo.comp);
2432}
2433
2434int
2435qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2436    port_id_t remote_did)
2437{
2438        srb_t *sp;
2439        fc_port_t *fcport = NULL;
2440        struct srb_iocb *elsio = NULL;
2441        struct qla_hw_data *ha = vha->hw;
2442        struct els_logo_payload logo_pyld;
2443        int rval = QLA_SUCCESS;
2444
2445        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2446        if (!fcport) {
2447               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2448               return -ENOMEM;
2449        }
2450
2451        /* Alloc SRB structure */
2452        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2453        if (!sp) {
2454                kfree(fcport);
2455                ql_log(ql_log_info, vha, 0x70e6,
2456                 "SRB allocation failed\n");
2457                return -ENOMEM;
2458        }
2459
2460        elsio = &sp->u.iocb_cmd;
2461        fcport->loop_id = 0xFFFF;
2462        fcport->d_id.b.domain = remote_did.b.domain;
2463        fcport->d_id.b.area = remote_did.b.area;
2464        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2465
2466        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2467            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2468
2469        sp->type = SRB_ELS_DCMD;
2470        sp->name = "ELS_DCMD";
2471        sp->fcport = fcport;
2472        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2473        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2474        sp->done = qla2x00_els_dcmd_sp_done;
2475        sp->free = qla2x00_els_dcmd_sp_free;
2476
2477        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2478                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2479                            GFP_KERNEL);
2480
2481        if (!elsio->u.els_logo.els_logo_pyld) {
2482                sp->free(sp);
2483                return QLA_FUNCTION_FAILED;
2484        }
2485
2486        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2487
2488        elsio->u.els_logo.els_cmd = els_opcode;
2489        logo_pyld.opcode = els_opcode;
2490        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2491        logo_pyld.s_id[1] = vha->d_id.b.area;
2492        logo_pyld.s_id[2] = vha->d_id.b.domain;
2493        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2494        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2495
2496        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2497            sizeof(struct els_logo_payload));
2498
2499        rval = qla2x00_start_sp(sp);
2500        if (rval != QLA_SUCCESS) {
2501                sp->free(sp);
2502                return QLA_FUNCTION_FAILED;
2503        }
2504
2505        ql_dbg(ql_dbg_io, vha, 0x3074,
2506            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2507            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2508            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2509
2510        wait_for_completion(&elsio->u.els_logo.comp);
2511
2512        sp->free(sp);
2513        return rval;
2514}
2515
2516static void
2517qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2518{
2519        scsi_qla_host_t *vha = sp->vha;
2520        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2521        uint32_t        dsd_len = 24;
2522
2523        els_iocb->entry_type = ELS_IOCB_TYPE;
2524        els_iocb->entry_count = 1;
2525        els_iocb->sys_define = 0;
2526        els_iocb->entry_status = 0;
2527        els_iocb->handle = sp->handle;
2528        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2529        els_iocb->tx_dsd_count = 1;
2530        els_iocb->vp_index = vha->vp_idx;
2531        els_iocb->sof_type = EST_SOFI3;
2532        els_iocb->rx_dsd_count = 0;
2533        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2534
2535        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2536        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2537        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2538        els_iocb->s_id[0] = vha->d_id.b.al_pa;
2539        els_iocb->s_id[1] = vha->d_id.b.area;
2540        els_iocb->s_id[2] = vha->d_id.b.domain;
2541        els_iocb->control_flags = 0;
2542
2543        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2544                els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
2545                els_iocb->tx_address[0] =
2546                        cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2547                els_iocb->tx_address[1] =
2548                        cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2549                els_iocb->tx_len = dsd_len;
2550
2551                els_iocb->rx_dsd_count = 1;
2552                els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
2553                els_iocb->rx_address[0] =
2554                        cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2555                els_iocb->rx_address[1] =
2556                        cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2557                els_iocb->rx_len = dsd_len;
2558                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2559                    "PLOGI ELS IOCB:\n");
2560                ql_dump_buffer(ql_log_info, vha, 0x0109,
2561                    (uint8_t *)els_iocb, 0x70);
2562        } else {
2563                els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2564                els_iocb->tx_address[0] =
2565                    cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2566                els_iocb->tx_address[1] =
2567                    cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2568                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2569
2570                els_iocb->rx_byte_count = 0;
2571                els_iocb->rx_address[0] = 0;
2572                els_iocb->rx_address[1] = 0;
2573                els_iocb->rx_len = 0;
2574        }
2575
2576        sp->vha->qla_stats.control_requests++;
2577}
2578
2579static void
2580qla2x00_els_dcmd2_sp_free(void *data)
2581{
2582        srb_t *sp = data;
2583        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2584
2585        if (elsio->u.els_plogi.els_plogi_pyld)
2586                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2587                    elsio->u.els_plogi.els_plogi_pyld,
2588                    elsio->u.els_plogi.els_plogi_pyld_dma);
2589
2590        if (elsio->u.els_plogi.els_resp_pyld)
2591                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2592                    elsio->u.els_plogi.els_resp_pyld,
2593                    elsio->u.els_plogi.els_resp_pyld_dma);
2594
2595        del_timer(&elsio->timer);
2596        qla2x00_rel_sp(sp);
2597}
2598
2599static void
2600qla2x00_els_dcmd2_iocb_timeout(void *data)
2601{
2602        srb_t *sp = data;
2603        fc_port_t *fcport = sp->fcport;
2604        struct scsi_qla_host *vha = sp->vha;
2605        struct qla_hw_data *ha = vha->hw;
2606        struct srb_iocb *lio = &sp->u.iocb_cmd;
2607        unsigned long flags = 0;
2608        int res;
2609
2610        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2611            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2612            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2613
2614        /* Abort the exchange */
2615        spin_lock_irqsave(&ha->hardware_lock, flags);
2616        res = ha->isp_ops->abort_command(sp);
2617        ql_dbg(ql_dbg_io, vha, 0x3070,
2618            "mbx abort_command %s\n",
2619            (res == QLA_SUCCESS) ? "successful" : "failed");
2620        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2621
2622        complete(&lio->u.els_plogi.comp);
2623}
2624
2625static void
2626qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2627{
2628        srb_t *sp = ptr;
2629        fc_port_t *fcport = sp->fcport;
2630        struct srb_iocb *lio = &sp->u.iocb_cmd;
2631        struct scsi_qla_host *vha = sp->vha;
2632
2633        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
2634            "%s ELS hdl=%x, portid=%06x done %8pC\n",
2635            sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
2636
2637        complete(&lio->u.els_plogi.comp);
2638}
2639
2640int
2641qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2642                       fc_port_t *fcport, port_id_t remote_did)
2643{
2644        srb_t *sp;
2645        struct srb_iocb *elsio = NULL;
2646        struct qla_hw_data *ha = vha->hw;
2647        int rval = QLA_SUCCESS;
2648        void    *ptr, *resp_ptr;
2649        dma_addr_t ptr_dma;
2650
2651        /* Alloc SRB structure */
2652        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2653        if (!sp) {
2654                ql_log(ql_log_info, vha, 0x70e6,
2655                 "SRB allocation failed\n");
2656                return -ENOMEM;
2657        }
2658
2659        elsio = &sp->u.iocb_cmd;
2660        fcport->d_id.b.domain = remote_did.b.domain;
2661        fcport->d_id.b.area = remote_did.b.area;
2662        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2663
2664        ql_dbg(ql_dbg_io, vha, 0x3073,
2665            "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2666
2667        sp->type = SRB_ELS_DCMD;
2668        sp->name = "ELS_DCMD";
2669        sp->fcport = fcport;
2670        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2671        elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2672        sp->done = qla2x00_els_dcmd2_sp_done;
2673        sp->free = qla2x00_els_dcmd2_sp_free;
2674
2675        ptr = elsio->u.els_plogi.els_plogi_pyld =
2676            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2677                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2678        ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2679
2680        if (!elsio->u.els_plogi.els_plogi_pyld) {
2681                rval = QLA_FUNCTION_FAILED;
2682                goto out;
2683        }
2684
2685        resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2686            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2687                &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2688
2689        if (!elsio->u.els_plogi.els_resp_pyld) {
2690                rval = QLA_FUNCTION_FAILED;
2691                goto out;
2692        }
2693
2694        ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2695
2696        memset(ptr, 0, sizeof(struct els_plogi_payload));
2697        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2698        elsio->u.els_plogi.els_cmd = els_opcode;
2699        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2700        qla24xx_get_port_login_templ(vha, ptr_dma + 4,
2701                &elsio->u.els_plogi.els_plogi_pyld->data[0],
2702                sizeof(struct els_plogi_payload));
2703
2704        ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2705        ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
2706            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2707
2708        init_completion(&elsio->u.els_plogi.comp);
2709        rval = qla2x00_start_sp(sp);
2710        if (rval != QLA_SUCCESS) {
2711                rval = QLA_FUNCTION_FAILED;
2712                goto out;
2713        }
2714
2715        ql_dbg(ql_dbg_io, vha, 0x3074,
2716            "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
2717            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
2718
2719        wait_for_completion(&elsio->u.els_plogi.comp);
2720
2721        if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2722                rval = QLA_FUNCTION_FAILED;
2723
2724out:
2725        sp->free(sp);
2726        return rval;
2727}
2728
2729static void
2730qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2731{
2732        struct bsg_job *bsg_job = sp->u.bsg_job;
2733        struct fc_bsg_request *bsg_request = bsg_job->request;
2734
2735        els_iocb->entry_type = ELS_IOCB_TYPE;
2736        els_iocb->entry_count = 1;
2737        els_iocb->sys_define = 0;
2738        els_iocb->entry_status = 0;
2739        els_iocb->handle = sp->handle;
2740        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2741        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2742        els_iocb->vp_index = sp->vha->vp_idx;
2743        els_iocb->sof_type = EST_SOFI3;
2744        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2745
2746        els_iocb->opcode =
2747            sp->type == SRB_ELS_CMD_RPT ?
2748            bsg_request->rqst_data.r_els.els_code :
2749            bsg_request->rqst_data.h_els.command_code;
2750        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2751        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2752        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2753        els_iocb->control_flags = 0;
2754        els_iocb->rx_byte_count =
2755            cpu_to_le32(bsg_job->reply_payload.payload_len);
2756        els_iocb->tx_byte_count =
2757            cpu_to_le32(bsg_job->request_payload.payload_len);
2758
2759        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2760            (bsg_job->request_payload.sg_list)));
2761        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2762            (bsg_job->request_payload.sg_list)));
2763        els_iocb->tx_len = cpu_to_le32(sg_dma_len
2764            (bsg_job->request_payload.sg_list));
2765
2766        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2767            (bsg_job->reply_payload.sg_list)));
2768        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2769            (bsg_job->reply_payload.sg_list)));
2770        els_iocb->rx_len = cpu_to_le32(sg_dma_len
2771            (bsg_job->reply_payload.sg_list));
2772
2773        sp->vha->qla_stats.control_requests++;
2774}
2775
2776static void
2777qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2778{
2779        uint16_t        avail_dsds;
2780        uint32_t        *cur_dsd;
2781        struct scatterlist *sg;
2782        int index;
2783        uint16_t tot_dsds;
2784        scsi_qla_host_t *vha = sp->vha;
2785        struct qla_hw_data *ha = vha->hw;
2786        struct bsg_job *bsg_job = sp->u.bsg_job;
2787        int loop_iterartion = 0;
2788        int entry_count = 1;
2789
2790        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2791        ct_iocb->entry_type = CT_IOCB_TYPE;
2792        ct_iocb->entry_status = 0;
2793        ct_iocb->handle1 = sp->handle;
2794        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2795        ct_iocb->status = cpu_to_le16(0);
2796        ct_iocb->control_flags = cpu_to_le16(0);
2797        ct_iocb->timeout = 0;
2798        ct_iocb->cmd_dsd_count =
2799            cpu_to_le16(bsg_job->request_payload.sg_cnt);
2800        ct_iocb->total_dsd_count =
2801            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2802        ct_iocb->req_bytecount =
2803            cpu_to_le32(bsg_job->request_payload.payload_len);
2804        ct_iocb->rsp_bytecount =
2805            cpu_to_le32(bsg_job->reply_payload.payload_len);
2806
2807        ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2808            (bsg_job->request_payload.sg_list)));
2809        ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2810            (bsg_job->request_payload.sg_list)));
2811        ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2812
2813        ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2814            (bsg_job->reply_payload.sg_list)));
2815        ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2816            (bsg_job->reply_payload.sg_list)));
2817        ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2818
2819        avail_dsds = 1;
2820        cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2821        index = 0;
2822        tot_dsds = bsg_job->reply_payload.sg_cnt;
2823
2824        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2825                dma_addr_t       sle_dma;
2826                cont_a64_entry_t *cont_pkt;
2827
2828                /* Allocate additional continuation packets? */
2829                if (avail_dsds == 0) {
2830                        /*
2831                        * Five DSDs are available in the Cont.
2832                        * Type 1 IOCB.
2833                               */
2834                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2835                            vha->hw->req_q_map[0]);
2836                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2837                        avail_dsds = 5;
2838                        entry_count++;
2839                }
2840
2841                sle_dma = sg_dma_address(sg);
2842                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2843                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2844                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2845                loop_iterartion++;
2846                avail_dsds--;
2847        }
2848        ct_iocb->entry_count = entry_count;
2849
2850        sp->vha->qla_stats.control_requests++;
2851}
2852
2853static void
2854qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2855{
2856        uint16_t        avail_dsds;
2857        uint32_t        *cur_dsd;
2858        struct scatterlist *sg;
2859        int index;
2860        uint16_t cmd_dsds, rsp_dsds;
2861        scsi_qla_host_t *vha = sp->vha;
2862        struct qla_hw_data *ha = vha->hw;
2863        struct bsg_job *bsg_job = sp->u.bsg_job;
2864        int entry_count = 1;
2865        cont_a64_entry_t *cont_pkt = NULL;
2866
2867        ct_iocb->entry_type = CT_IOCB_TYPE;
2868        ct_iocb->entry_status = 0;
2869        ct_iocb->sys_define = 0;
2870        ct_iocb->handle = sp->handle;
2871
2872        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2873        ct_iocb->vp_index = sp->vha->vp_idx;
2874        ct_iocb->comp_status = cpu_to_le16(0);
2875
2876        cmd_dsds = bsg_job->request_payload.sg_cnt;
2877        rsp_dsds = bsg_job->reply_payload.sg_cnt;
2878
2879        ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2880        ct_iocb->timeout = 0;
2881        ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2882        ct_iocb->cmd_byte_count =
2883            cpu_to_le32(bsg_job->request_payload.payload_len);
2884
2885        avail_dsds = 2;
2886        cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2887        index = 0;
2888
2889        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2890                dma_addr_t       sle_dma;
2891
2892                /* Allocate additional continuation packets? */
2893                if (avail_dsds == 0) {
2894                        /*
2895                         * Five DSDs are available in the Cont.
2896                         * Type 1 IOCB.
2897                         */
2898                        cont_pkt = qla2x00_prep_cont_type1_iocb(
2899                            vha, ha->req_q_map[0]);
2900                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2901                        avail_dsds = 5;
2902                        entry_count++;
2903                }
2904
2905                sle_dma = sg_dma_address(sg);
2906                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2907                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2908                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2909                avail_dsds--;
2910        }
2911
2912        index = 0;
2913
2914        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2915                dma_addr_t       sle_dma;
2916
2917                /* Allocate additional continuation packets? */
2918                if (avail_dsds == 0) {
2919                        /*
2920                        * Five DSDs are available in the Cont.
2921                        * Type 1 IOCB.
2922                               */
2923                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2924                            ha->req_q_map[0]);
2925                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2926                        avail_dsds = 5;
2927                        entry_count++;
2928                }
2929
2930                sle_dma = sg_dma_address(sg);
2931                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2932                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2933                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2934                avail_dsds--;
2935        }
2936        ct_iocb->entry_count = entry_count;
2937}
2938
2939/*
2940 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2941 * @sp: command to send to the ISP
2942 *
2943 * Returns non-zero if a failure occurred, else zero.
2944 */
2945int
2946qla82xx_start_scsi(srb_t *sp)
2947{
2948        int             nseg;
2949        unsigned long   flags;
2950        struct scsi_cmnd *cmd;
2951        uint32_t        *clr_ptr;
2952        uint32_t        index;
2953        uint32_t        handle;
2954        uint16_t        cnt;
2955        uint16_t        req_cnt;
2956        uint16_t        tot_dsds;
2957        struct device_reg_82xx __iomem *reg;
2958        uint32_t dbval;
2959        uint32_t *fcp_dl;
2960        uint8_t additional_cdb_len;
2961        struct ct6_dsd *ctx;
2962        struct scsi_qla_host *vha = sp->vha;
2963        struct qla_hw_data *ha = vha->hw;
2964        struct req_que *req = NULL;
2965        struct rsp_que *rsp = NULL;
2966
2967        /* Setup device pointers. */
2968        reg = &ha->iobase->isp82;
2969        cmd = GET_CMD_SP(sp);
2970        req = vha->req;
2971        rsp = ha->rsp_q_map[0];
2972
2973        /* So we know we haven't pci_map'ed anything yet */
2974        tot_dsds = 0;
2975
2976        dbval = 0x04 | (ha->portnum << 5);
2977
2978        /* Send marker if required */
2979        if (vha->marker_needed != 0) {
2980                if (qla2x00_marker(vha, req,
2981                        rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2982                        ql_log(ql_log_warn, vha, 0x300c,
2983                            "qla2x00_marker failed for cmd=%p.\n", cmd);
2984                        return QLA_FUNCTION_FAILED;
2985                }
2986                vha->marker_needed = 0;
2987        }
2988
2989        /* Acquire ring specific lock */
2990        spin_lock_irqsave(&ha->hardware_lock, flags);
2991
2992        /* Check for room in outstanding command list. */
2993        handle = req->current_outstanding_cmd;
2994        for (index = 1; index < req->num_outstanding_cmds; index++) {
2995                handle++;
2996                if (handle == req->num_outstanding_cmds)
2997                        handle = 1;
2998                if (!req->outstanding_cmds[handle])
2999                        break;
3000        }
3001        if (index == req->num_outstanding_cmds)
3002                goto queuing_error;
3003
3004        /* Map the sg table so we have an accurate count of sg entries needed */
3005        if (scsi_sg_count(cmd)) {
3006                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3007                    scsi_sg_count(cmd), cmd->sc_data_direction);
3008                if (unlikely(!nseg))
3009                        goto queuing_error;
3010        } else
3011                nseg = 0;
3012
3013        tot_dsds = nseg;
3014
3015        if (tot_dsds > ql2xshiftctondsd) {
3016                struct cmd_type_6 *cmd_pkt;
3017                uint16_t more_dsd_lists = 0;
3018                struct dsd_dma *dsd_ptr;
3019                uint16_t i;
3020
3021                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3022                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3023                        ql_dbg(ql_dbg_io, vha, 0x300d,
3024                            "Num of DSD list %d is than %d for cmd=%p.\n",
3025                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3026                            cmd);
3027                        goto queuing_error;
3028                }
3029
3030                if (more_dsd_lists <= ha->gbl_dsd_avail)
3031                        goto sufficient_dsds;
3032                else
3033                        more_dsd_lists -= ha->gbl_dsd_avail;
3034
3035                for (i = 0; i < more_dsd_lists; i++) {
3036                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3037                        if (!dsd_ptr) {
3038                                ql_log(ql_log_fatal, vha, 0x300e,
3039                                    "Failed to allocate memory for dsd_dma "
3040                                    "for cmd=%p.\n", cmd);
3041                                goto queuing_error;
3042                        }
3043
3044                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3045                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3046                        if (!dsd_ptr->dsd_addr) {
3047                                kfree(dsd_ptr);
3048                                ql_log(ql_log_fatal, vha, 0x300f,
3049                                    "Failed to allocate memory for dsd_addr "
3050                                    "for cmd=%p.\n", cmd);
3051                                goto queuing_error;
3052                        }
3053                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3054                        ha->gbl_dsd_avail++;
3055                }
3056
3057sufficient_dsds:
3058                req_cnt = 1;
3059
3060                if (req->cnt < (req_cnt + 2)) {
3061                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3062                                &reg->req_q_out[0]);
3063                        if (req->ring_index < cnt)
3064                                req->cnt = cnt - req->ring_index;
3065                        else
3066                                req->cnt = req->length -
3067                                        (req->ring_index - cnt);
3068                        if (req->cnt < (req_cnt + 2))
3069                                goto queuing_error;
3070                }
3071
3072                ctx = sp->u.scmd.ctx =
3073                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3074                if (!ctx) {
3075                        ql_log(ql_log_fatal, vha, 0x3010,
3076                            "Failed to allocate ctx for cmd=%p.\n", cmd);
3077                        goto queuing_error;
3078                }
3079
3080                memset(ctx, 0, sizeof(struct ct6_dsd));
3081                ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
3082                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3083                if (!ctx->fcp_cmnd) {
3084                        ql_log(ql_log_fatal, vha, 0x3011,
3085                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3086                        goto queuing_error;
3087                }
3088
3089                /* Initialize the DSD list and dma handle */
3090                INIT_LIST_HEAD(&ctx->dsd_list);
3091                ctx->dsd_use_cnt = 0;
3092
3093                if (cmd->cmd_len > 16) {
3094                        additional_cdb_len = cmd->cmd_len - 16;
3095                        if ((cmd->cmd_len % 4) != 0) {
3096                                /* SCSI command bigger than 16 bytes must be
3097                                 * multiple of 4
3098                                 */
3099                                ql_log(ql_log_warn, vha, 0x3012,
3100                                    "scsi cmd len %d not multiple of 4 "
3101                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
3102                                goto queuing_error_fcp_cmnd;
3103                        }
3104                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3105                } else {
3106                        additional_cdb_len = 0;
3107                        ctx->fcp_cmnd_len = 12 + 16 + 4;
3108                }
3109
3110                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3111                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3112
3113                /* Zero out remaining portion of packet. */
3114                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3115                clr_ptr = (uint32_t *)cmd_pkt + 2;
3116                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3117                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3118
3119                /* Set NPORT-ID and LUN number*/
3120                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3121                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3122                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3123                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3124                cmd_pkt->vp_index = sp->vha->vp_idx;
3125
3126                /* Build IOCB segments */
3127                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3128                        goto queuing_error_fcp_cmnd;
3129
3130                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3131                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3132
3133                /* build FCP_CMND IU */
3134                memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
3135                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3136                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3137
3138                if (cmd->sc_data_direction == DMA_TO_DEVICE)
3139                        ctx->fcp_cmnd->additional_cdb_len |= 1;
3140                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3141                        ctx->fcp_cmnd->additional_cdb_len |= 2;
3142
3143                /* Populate the FCP_PRIO. */
3144                if (ha->flags.fcp_prio_enabled)
3145                        ctx->fcp_cmnd->task_attribute |=
3146                            sp->fcport->fcp_prio << 3;
3147
3148                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3149
3150                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3151                    additional_cdb_len);
3152                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3153
3154                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3155                cmd_pkt->fcp_cmnd_dseg_address[0] =
3156                    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3157                cmd_pkt->fcp_cmnd_dseg_address[1] =
3158                    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3159
3160                sp->flags |= SRB_FCP_CMND_DMA_VALID;
3161                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3162                /* Set total data segment count. */
3163                cmd_pkt->entry_count = (uint8_t)req_cnt;
3164                /* Specify response queue number where
3165                 * completion should happen
3166                 */
3167                cmd_pkt->entry_status = (uint8_t) rsp->id;
3168        } else {
3169                struct cmd_type_7 *cmd_pkt;
3170                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3171                if (req->cnt < (req_cnt + 2)) {
3172                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3173                            &reg->req_q_out[0]);
3174                        if (req->ring_index < cnt)
3175                                req->cnt = cnt - req->ring_index;
3176                        else
3177                                req->cnt = req->length -
3178                                        (req->ring_index - cnt);
3179                }
3180                if (req->cnt < (req_cnt + 2))
3181                        goto queuing_error;
3182
3183                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3184                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3185
3186                /* Zero out remaining portion of packet. */
3187                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3188                clr_ptr = (uint32_t *)cmd_pkt + 2;
3189                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3190                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3191
3192                /* Set NPORT-ID and LUN number*/
3193                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3194                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3195                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3196                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3197                cmd_pkt->vp_index = sp->vha->vp_idx;
3198
3199                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3200                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3201                    sizeof(cmd_pkt->lun));
3202
3203                /* Populate the FCP_PRIO. */
3204                if (ha->flags.fcp_prio_enabled)
3205                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3206
3207                /* Load SCSI command packet. */
3208                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3209                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3210
3211                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3212
3213                /* Build IOCB segments */
3214                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3215
3216                /* Set total data segment count. */
3217                cmd_pkt->entry_count = (uint8_t)req_cnt;
3218                /* Specify response queue number where
3219                 * completion should happen.
3220                 */
3221                cmd_pkt->entry_status = (uint8_t) rsp->id;
3222
3223        }
3224        /* Build command packet. */
3225        req->current_outstanding_cmd = handle;
3226        req->outstanding_cmds[handle] = sp;
3227        sp->handle = handle;
3228        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3229        req->cnt -= req_cnt;
3230        wmb();
3231
3232        /* Adjust ring index. */
3233        req->ring_index++;
3234        if (req->ring_index == req->length) {
3235                req->ring_index = 0;
3236                req->ring_ptr = req->ring;
3237        } else
3238                req->ring_ptr++;
3239
3240        sp->flags |= SRB_DMA_VALID;
3241
3242        /* Set chip new ring index. */
3243        /* write, read and verify logic */
3244        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3245        if (ql2xdbwr)
3246                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3247        else {
3248                WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3249                wmb();
3250                while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3251                        WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3252                        wmb();
3253                }
3254        }
3255
3256        /* Manage unprocessed RIO/ZIO commands in response queue. */
3257        if (vha->flags.process_response_queue &&
3258            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3259                qla24xx_process_response_queue(vha, rsp);
3260
3261        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3262        return QLA_SUCCESS;
3263
3264queuing_error_fcp_cmnd:
3265        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3266queuing_error:
3267        if (tot_dsds)
3268                scsi_dma_unmap(cmd);
3269
3270        if (sp->u.scmd.ctx) {
3271                mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3272                sp->u.scmd.ctx = NULL;
3273        }
3274        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3275
3276        return QLA_FUNCTION_FAILED;
3277}
3278
3279static void
3280qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3281{
3282        struct srb_iocb *aio = &sp->u.iocb_cmd;
3283        scsi_qla_host_t *vha = sp->vha;
3284        struct req_que *req = vha->req;
3285
3286        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3287        abt_iocb->entry_type = ABORT_IOCB_TYPE;
3288        abt_iocb->entry_count = 1;
3289        abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3290        abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3291        abt_iocb->handle_to_abort =
3292            cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3293        abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3294        abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3295        abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3296        abt_iocb->vp_index = vha->vp_idx;
3297        abt_iocb->req_que_no = cpu_to_le16(req->id);
3298        /* Send the command to the firmware */
3299        wmb();
3300}
3301
3302static void
3303qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3304{
3305        int i, sz;
3306
3307        mbx->entry_type = MBX_IOCB_TYPE;
3308        mbx->handle = sp->handle;
3309        sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3310
3311        for (i = 0; i < sz; i++)
3312                mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3313}
3314
3315static void
3316qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3317{
3318        sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3319        qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3320        ct_pkt->handle = sp->handle;
3321}
3322
3323static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3324        struct nack_to_isp *nack)
3325{
3326        struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3327
3328        nack->entry_type = NOTIFY_ACK_TYPE;
3329        nack->entry_count = 1;
3330        nack->ox_id = ntfy->ox_id;
3331
3332        nack->u.isp24.handle = sp->handle;
3333        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3334        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3335                nack->u.isp24.flags = ntfy->u.isp24.flags &
3336                        cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3337        }
3338        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3339        nack->u.isp24.status = ntfy->u.isp24.status;
3340        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3341        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3342        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3343        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3344        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3345        nack->u.isp24.srr_flags = 0;
3346        nack->u.isp24.srr_reject_code = 0;
3347        nack->u.isp24.srr_reject_code_expl = 0;
3348        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3349}
3350
3351/*
3352 * Build NVME LS request
3353 */
3354static int
3355qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3356{
3357        struct srb_iocb *nvme;
3358        int     rval = QLA_SUCCESS;
3359
3360        nvme = &sp->u.iocb_cmd;
3361        cmd_pkt->entry_type = PT_LS4_REQUEST;
3362        cmd_pkt->entry_count = 1;
3363        cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3364
3365        cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3366        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3367        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3368
3369        cmd_pkt->tx_dseg_count = 1;
3370        cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3371        cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3372        cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3373        cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3374
3375        cmd_pkt->rx_dseg_count = 1;
3376        cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3377        cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3378        cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3379        cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3380
3381        return rval;
3382}
3383
3384int
3385qla2x00_start_sp(srb_t *sp)
3386{
3387        int rval;
3388        scsi_qla_host_t *vha = sp->vha;
3389        struct qla_hw_data *ha = vha->hw;
3390        void *pkt;
3391        unsigned long flags;
3392
3393        rval = QLA_FUNCTION_FAILED;
3394        spin_lock_irqsave(&ha->hardware_lock, flags);
3395        pkt = qla2x00_alloc_iocbs(vha, sp);
3396        if (!pkt) {
3397                ql_log(ql_log_warn, vha, 0x700c,
3398                    "qla2x00_alloc_iocbs failed.\n");
3399                goto done;
3400        }
3401
3402        rval = QLA_SUCCESS;
3403        switch (sp->type) {
3404        case SRB_LOGIN_CMD:
3405                IS_FWI2_CAPABLE(ha) ?
3406                    qla24xx_login_iocb(sp, pkt) :
3407                    qla2x00_login_iocb(sp, pkt);
3408                break;
3409        case SRB_PRLI_CMD:
3410                qla24xx_prli_iocb(sp, pkt);
3411                break;
3412        case SRB_LOGOUT_CMD:
3413                IS_FWI2_CAPABLE(ha) ?
3414                    qla24xx_logout_iocb(sp, pkt) :
3415                    qla2x00_logout_iocb(sp, pkt);
3416                break;
3417        case SRB_ELS_CMD_RPT:
3418        case SRB_ELS_CMD_HST:
3419                qla24xx_els_iocb(sp, pkt);
3420                break;
3421        case SRB_CT_CMD:
3422                IS_FWI2_CAPABLE(ha) ?
3423                    qla24xx_ct_iocb(sp, pkt) :
3424                    qla2x00_ct_iocb(sp, pkt);
3425                break;
3426        case SRB_ADISC_CMD:
3427                IS_FWI2_CAPABLE(ha) ?
3428                    qla24xx_adisc_iocb(sp, pkt) :
3429                    qla2x00_adisc_iocb(sp, pkt);
3430                break;
3431        case SRB_TM_CMD:
3432                IS_QLAFX00(ha) ?
3433                    qlafx00_tm_iocb(sp, pkt) :
3434                    qla24xx_tm_iocb(sp, pkt);
3435                break;
3436        case SRB_FXIOCB_DCMD:
3437        case SRB_FXIOCB_BCMD:
3438                qlafx00_fxdisc_iocb(sp, pkt);
3439                break;
3440        case SRB_NVME_LS:
3441                qla_nvme_ls(sp, pkt);
3442                break;
3443        case SRB_ABT_CMD:
3444                IS_QLAFX00(ha) ?
3445                        qlafx00_abort_iocb(sp, pkt) :
3446                        qla24xx_abort_iocb(sp, pkt);
3447                break;
3448        case SRB_ELS_DCMD:
3449                qla24xx_els_logo_iocb(sp, pkt);
3450                break;
3451        case SRB_CT_PTHRU_CMD:
3452                qla2x00_ctpthru_cmd_iocb(sp, pkt);
3453                break;
3454        case SRB_MB_IOCB:
3455                qla2x00_mb_iocb(sp, pkt);
3456                break;
3457        case SRB_NACK_PLOGI:
3458        case SRB_NACK_PRLI:
3459        case SRB_NACK_LOGO:
3460                qla2x00_send_notify_ack_iocb(sp, pkt);
3461                break;
3462        default:
3463                break;
3464        }
3465
3466        wmb();
3467        qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3468done:
3469        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3470        return rval;
3471}
3472
3473static void
3474qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3475                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3476{
3477        uint16_t avail_dsds;
3478        uint32_t *cur_dsd;
3479        uint32_t req_data_len = 0;
3480        uint32_t rsp_data_len = 0;
3481        struct scatterlist *sg;
3482        int index;
3483        int entry_count = 1;
3484        struct bsg_job *bsg_job = sp->u.bsg_job;
3485
3486        /*Update entry type to indicate bidir command */
3487        *((uint32_t *)(&cmd_pkt->entry_type)) =
3488                cpu_to_le32(COMMAND_BIDIRECTIONAL);
3489
3490        /* Set the transfer direction, in this set both flags
3491         * Also set the BD_WRAP_BACK flag, firmware will take care
3492         * assigning DID=SID for outgoing pkts.
3493         */
3494        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3495        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3496        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3497                                                        BD_WRAP_BACK);
3498
3499        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3500        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3501        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3502        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3503
3504        vha->bidi_stats.transfer_bytes += req_data_len;
3505        vha->bidi_stats.io_count++;
3506
3507        vha->qla_stats.output_bytes += req_data_len;
3508        vha->qla_stats.output_requests++;
3509
3510        /* Only one dsd is available for bidirectional IOCB, remaining dsds
3511         * are bundled in continuation iocb
3512         */
3513        avail_dsds = 1;
3514        cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3515
3516        index = 0;
3517
3518        for_each_sg(bsg_job->request_payload.sg_list, sg,
3519                                bsg_job->request_payload.sg_cnt, index) {
3520                dma_addr_t sle_dma;
3521                cont_a64_entry_t *cont_pkt;
3522
3523                /* Allocate additional continuation packets */
3524                if (avail_dsds == 0) {
3525                        /* Continuation type 1 IOCB can accomodate
3526                         * 5 DSDS
3527                         */
3528                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3529                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3530                        avail_dsds = 5;
3531                        entry_count++;
3532                }
3533                sle_dma = sg_dma_address(sg);
3534                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3535                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3536                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3537                avail_dsds--;
3538        }
3539        /* For read request DSD will always goes to continuation IOCB
3540         * and follow the write DSD. If there is room on the current IOCB
3541         * then it is added to that IOCB else new continuation IOCB is
3542         * allocated.
3543         */
3544        for_each_sg(bsg_job->reply_payload.sg_list, sg,
3545                                bsg_job->reply_payload.sg_cnt, index) {
3546                dma_addr_t sle_dma;
3547                cont_a64_entry_t *cont_pkt;
3548
3549                /* Allocate additional continuation packets */
3550                if (avail_dsds == 0) {
3551                        /* Continuation type 1 IOCB can accomodate
3552                         * 5 DSDS
3553                         */
3554                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3555                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3556                        avail_dsds = 5;
3557                        entry_count++;
3558                }
3559                sle_dma = sg_dma_address(sg);
3560                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3561                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3562                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3563                avail_dsds--;
3564        }
3565        /* This value should be same as number of IOCB required for this cmd */
3566        cmd_pkt->entry_count = entry_count;
3567}
3568
3569int
3570qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3571{
3572
3573        struct qla_hw_data *ha = vha->hw;
3574        unsigned long flags;
3575        uint32_t handle;
3576        uint32_t index;
3577        uint16_t req_cnt;
3578        uint16_t cnt;
3579        uint32_t *clr_ptr;
3580        struct cmd_bidir *cmd_pkt = NULL;
3581        struct rsp_que *rsp;
3582        struct req_que *req;
3583        int rval = EXT_STATUS_OK;
3584
3585        rval = QLA_SUCCESS;
3586
3587        rsp = ha->rsp_q_map[0];
3588        req = vha->req;
3589
3590        /* Send marker if required */
3591        if (vha->marker_needed != 0) {
3592                if (qla2x00_marker(vha, req,
3593                        rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3594                        return EXT_STATUS_MAILBOX;
3595                vha->marker_needed = 0;
3596        }
3597
3598        /* Acquire ring specific lock */
3599        spin_lock_irqsave(&ha->hardware_lock, flags);
3600
3601        /* Check for room in outstanding command list. */
3602        handle = req->current_outstanding_cmd;
3603        for (index = 1; index < req->num_outstanding_cmds; index++) {
3604                handle++;
3605                if (handle == req->num_outstanding_cmds)
3606                        handle = 1;
3607                if (!req->outstanding_cmds[handle])
3608                        break;
3609        }
3610
3611        if (index == req->num_outstanding_cmds) {
3612                rval = EXT_STATUS_BUSY;
3613                goto queuing_error;
3614        }
3615
3616        /* Calculate number of IOCB required */
3617        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3618
3619        /* Check for room on request queue. */
3620        if (req->cnt < req_cnt + 2) {
3621                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3622                    RD_REG_DWORD_RELAXED(req->req_q_out);
3623                if  (req->ring_index < cnt)
3624                        req->cnt = cnt - req->ring_index;
3625                else
3626                        req->cnt = req->length -
3627                                (req->ring_index - cnt);
3628        }
3629        if (req->cnt < req_cnt + 2) {
3630                rval = EXT_STATUS_BUSY;
3631                goto queuing_error;
3632        }
3633
3634        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3635        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3636
3637        /* Zero out remaining portion of packet. */
3638        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3639        clr_ptr = (uint32_t *)cmd_pkt + 2;
3640        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3641
3642        /* Set NPORT-ID  (of vha)*/
3643        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3644        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3645        cmd_pkt->port_id[1] = vha->d_id.b.area;
3646        cmd_pkt->port_id[2] = vha->d_id.b.domain;
3647
3648        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3649        cmd_pkt->entry_status = (uint8_t) rsp->id;
3650        /* Build command packet. */
3651        req->current_outstanding_cmd = handle;
3652        req->outstanding_cmds[handle] = sp;
3653        sp->handle = handle;
3654        req->cnt -= req_cnt;
3655
3656        /* Send the command to the firmware */
3657        wmb();
3658        qla2x00_start_iocbs(vha, req);
3659queuing_error:
3660        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3661        return rval;
3662}
3663