linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_target.h"
   9
  10#include <linux/blkdev.h>
  11#include <linux/delay.h>
  12
  13#include <scsi/scsi_tcq.h>
  14
  15/**
  16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17 * @sp: SCSI command
  18 *
  19 * Returns the proper CF_* direction based on CDB.
  20 */
  21static inline uint16_t
  22qla2x00_get_cmd_direction(srb_t *sp)
  23{
  24        uint16_t cflags;
  25        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  26        struct scsi_qla_host *vha = sp->vha;
  27
  28        cflags = 0;
  29
  30        /* Set transfer direction */
  31        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  32                cflags = CF_WRITE;
  33                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  34                vha->qla_stats.output_requests++;
  35        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  36                cflags = CF_READ;
  37                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  38                vha->qla_stats.input_requests++;
  39        }
  40        return (cflags);
  41}
  42
  43/**
  44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  45 * Continuation Type 0 IOCBs to allocate.
  46 *
  47 * @dsds: number of data segment decriptors needed
  48 *
  49 * Returns the number of IOCB entries needed to store @dsds.
  50 */
  51uint16_t
  52qla2x00_calc_iocbs_32(uint16_t dsds)
  53{
  54        uint16_t iocbs;
  55
  56        iocbs = 1;
  57        if (dsds > 3) {
  58                iocbs += (dsds - 3) / 7;
  59                if ((dsds - 3) % 7)
  60                        iocbs++;
  61        }
  62        return (iocbs);
  63}
  64
  65/**
  66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  67 * Continuation Type 1 IOCBs to allocate.
  68 *
  69 * @dsds: number of data segment decriptors needed
  70 *
  71 * Returns the number of IOCB entries needed to store @dsds.
  72 */
  73uint16_t
  74qla2x00_calc_iocbs_64(uint16_t dsds)
  75{
  76        uint16_t iocbs;
  77
  78        iocbs = 1;
  79        if (dsds > 2) {
  80                iocbs += (dsds - 2) / 5;
  81                if ((dsds - 2) % 5)
  82                        iocbs++;
  83        }
  84        return (iocbs);
  85}
  86
  87/**
  88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  89 * @vha: HA context
  90 *
  91 * Returns a pointer to the Continuation Type 0 IOCB packet.
  92 */
  93static inline cont_entry_t *
  94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  95{
  96        cont_entry_t *cont_pkt;
  97        struct req_que *req = vha->req;
  98        /* Adjust ring index. */
  99        req->ring_index++;
 100        if (req->ring_index == req->length) {
 101                req->ring_index = 0;
 102                req->ring_ptr = req->ring;
 103        } else {
 104                req->ring_ptr++;
 105        }
 106
 107        cont_pkt = (cont_entry_t *)req->ring_ptr;
 108
 109        /* Load packet defaults. */
 110        put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 111
 112        return (cont_pkt);
 113}
 114
 115/**
 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 117 * @vha: HA context
 118 * @req: request queue
 119 *
 120 * Returns a pointer to the continuation type 1 IOCB packet.
 121 */
 122static inline cont_a64_entry_t *
 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 124{
 125        cont_a64_entry_t *cont_pkt;
 126
 127        /* Adjust ring index. */
 128        req->ring_index++;
 129        if (req->ring_index == req->length) {
 130                req->ring_index = 0;
 131                req->ring_ptr = req->ring;
 132        } else {
 133                req->ring_ptr++;
 134        }
 135
 136        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 137
 138        /* Load packet defaults. */
 139        put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
 140                           CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 141
 142        return (cont_pkt);
 143}
 144
 145inline int
 146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 147{
 148        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 149        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 150
 151        /* We always use DIFF Bundling for best performance */
 152        *fw_prot_opts = 0;
 153
 154        /* Translate SCSI opcode to a protection opcode */
 155        switch (scsi_get_prot_op(cmd)) {
 156        case SCSI_PROT_READ_STRIP:
 157                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 158                break;
 159        case SCSI_PROT_WRITE_INSERT:
 160                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 161                break;
 162        case SCSI_PROT_READ_INSERT:
 163                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 164                break;
 165        case SCSI_PROT_WRITE_STRIP:
 166                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 167                break;
 168        case SCSI_PROT_READ_PASS:
 169        case SCSI_PROT_WRITE_PASS:
 170                if (guard & SHOST_DIX_GUARD_IP)
 171                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 172                else
 173                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 174                break;
 175        default:        /* Normal Request */
 176                *fw_prot_opts |= PO_MODE_DIF_PASS;
 177                break;
 178        }
 179
 180        return scsi_prot_sg_count(cmd);
 181}
 182
 183/*
 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 185 * capable IOCB types.
 186 *
 187 * @sp: SRB command to process
 188 * @cmd_pkt: Command type 2 IOCB
 189 * @tot_dsds: Total number of segments to transfer
 190 */
 191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 192    uint16_t tot_dsds)
 193{
 194        uint16_t        avail_dsds;
 195        struct dsd32    *cur_dsd;
 196        scsi_qla_host_t *vha;
 197        struct scsi_cmnd *cmd;
 198        struct scatterlist *sg;
 199        int i;
 200
 201        cmd = GET_CMD_SP(sp);
 202
 203        /* Update entry type to indicate Command Type 2 IOCB */
 204        put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 205
 206        /* No data transfer */
 207        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 208                cmd_pkt->byte_count = cpu_to_le32(0);
 209                return;
 210        }
 211
 212        vha = sp->vha;
 213        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 214
 215        /* Three DSDs are available in the Command Type 2 IOCB */
 216        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
 217        cur_dsd = cmd_pkt->dsd32;
 218
 219        /* Load data segments */
 220        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 221                cont_entry_t *cont_pkt;
 222
 223                /* Allocate additional continuation packets? */
 224                if (avail_dsds == 0) {
 225                        /*
 226                         * Seven DSDs are available in the Continuation
 227                         * Type 0 IOCB.
 228                         */
 229                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 230                        cur_dsd = cont_pkt->dsd;
 231                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 232                }
 233
 234                append_dsd32(&cur_dsd, sg);
 235                avail_dsds--;
 236        }
 237}
 238
 239/**
 240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 241 * capable IOCB types.
 242 *
 243 * @sp: SRB command to process
 244 * @cmd_pkt: Command type 3 IOCB
 245 * @tot_dsds: Total number of segments to transfer
 246 */
 247void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 248    uint16_t tot_dsds)
 249{
 250        uint16_t        avail_dsds;
 251        struct dsd64    *cur_dsd;
 252        scsi_qla_host_t *vha;
 253        struct scsi_cmnd *cmd;
 254        struct scatterlist *sg;
 255        int i;
 256
 257        cmd = GET_CMD_SP(sp);
 258
 259        /* Update entry type to indicate Command Type 3 IOCB */
 260        put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 261
 262        /* No data transfer */
 263        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 264                cmd_pkt->byte_count = cpu_to_le32(0);
 265                return;
 266        }
 267
 268        vha = sp->vha;
 269        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 270
 271        /* Two DSDs are available in the Command Type 3 IOCB */
 272        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
 273        cur_dsd = cmd_pkt->dsd64;
 274
 275        /* Load data segments */
 276        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 277                cont_a64_entry_t *cont_pkt;
 278
 279                /* Allocate additional continuation packets? */
 280                if (avail_dsds == 0) {
 281                        /*
 282                         * Five DSDs are available in the Continuation
 283                         * Type 1 IOCB.
 284                         */
 285                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 286                        cur_dsd = cont_pkt->dsd;
 287                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 288                }
 289
 290                append_dsd64(&cur_dsd, sg);
 291                avail_dsds--;
 292        }
 293}
 294
 295/*
 296 * Find the first handle that is not in use, starting from
 297 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
 298 * associated with @req.
 299 */
 300uint32_t qla2xxx_get_next_handle(struct req_que *req)
 301{
 302        uint32_t index, handle = req->current_outstanding_cmd;
 303
 304        for (index = 1; index < req->num_outstanding_cmds; index++) {
 305                handle++;
 306                if (handle == req->num_outstanding_cmds)
 307                        handle = 1;
 308                if (!req->outstanding_cmds[handle])
 309                        return handle;
 310        }
 311
 312        return 0;
 313}
 314
 315/**
 316 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 317 * @sp: command to send to the ISP
 318 *
 319 * Returns non-zero if a failure occurred, else zero.
 320 */
 321int
 322qla2x00_start_scsi(srb_t *sp)
 323{
 324        int             nseg;
 325        unsigned long   flags;
 326        scsi_qla_host_t *vha;
 327        struct scsi_cmnd *cmd;
 328        uint32_t        *clr_ptr;
 329        uint32_t        handle;
 330        cmd_entry_t     *cmd_pkt;
 331        uint16_t        cnt;
 332        uint16_t        req_cnt;
 333        uint16_t        tot_dsds;
 334        struct device_reg_2xxx __iomem *reg;
 335        struct qla_hw_data *ha;
 336        struct req_que *req;
 337        struct rsp_que *rsp;
 338
 339        /* Setup device pointers. */
 340        vha = sp->vha;
 341        ha = vha->hw;
 342        reg = &ha->iobase->isp;
 343        cmd = GET_CMD_SP(sp);
 344        req = ha->req_q_map[0];
 345        rsp = ha->rsp_q_map[0];
 346        /* So we know we haven't pci_map'ed anything yet */
 347        tot_dsds = 0;
 348
 349        /* Send marker if required */
 350        if (vha->marker_needed != 0) {
 351                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 352                    QLA_SUCCESS) {
 353                        return (QLA_FUNCTION_FAILED);
 354                }
 355                vha->marker_needed = 0;
 356        }
 357
 358        /* Acquire ring specific lock */
 359        spin_lock_irqsave(&ha->hardware_lock, flags);
 360
 361        handle = qla2xxx_get_next_handle(req);
 362        if (handle == 0)
 363                goto queuing_error;
 364
 365        /* Map the sg table so we have an accurate count of sg entries needed */
 366        if (scsi_sg_count(cmd)) {
 367                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 368                    scsi_sg_count(cmd), cmd->sc_data_direction);
 369                if (unlikely(!nseg))
 370                        goto queuing_error;
 371        } else
 372                nseg = 0;
 373
 374        tot_dsds = nseg;
 375
 376        /* Calculate the number of request entries needed. */
 377        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 378        if (req->cnt < (req_cnt + 2)) {
 379                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
 380                if (req->ring_index < cnt)
 381                        req->cnt = cnt - req->ring_index;
 382                else
 383                        req->cnt = req->length -
 384                            (req->ring_index - cnt);
 385                /* If still no head room then bail out */
 386                if (req->cnt < (req_cnt + 2))
 387                        goto queuing_error;
 388        }
 389
 390        /* Build command packet */
 391        req->current_outstanding_cmd = handle;
 392        req->outstanding_cmds[handle] = sp;
 393        sp->handle = handle;
 394        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 395        req->cnt -= req_cnt;
 396
 397        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 398        cmd_pkt->handle = handle;
 399        /* Zero out remaining portion of packet. */
 400        clr_ptr = (uint32_t *)cmd_pkt + 2;
 401        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 402        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 403
 404        /* Set target ID and LUN number*/
 405        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 406        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 407        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 408
 409        /* Load SCSI command packet. */
 410        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 411        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 412
 413        /* Build IOCB segments */
 414        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 415
 416        /* Set total data segment count. */
 417        cmd_pkt->entry_count = (uint8_t)req_cnt;
 418        wmb();
 419
 420        /* Adjust ring index. */
 421        req->ring_index++;
 422        if (req->ring_index == req->length) {
 423                req->ring_index = 0;
 424                req->ring_ptr = req->ring;
 425        } else
 426                req->ring_ptr++;
 427
 428        sp->flags |= SRB_DMA_VALID;
 429
 430        /* Set chip new ring index. */
 431        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 432        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 433
 434        /* Manage unprocessed RIO/ZIO commands in response queue. */
 435        if (vha->flags.process_response_queue &&
 436            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 437                qla2x00_process_response_queue(rsp);
 438
 439        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 440        return (QLA_SUCCESS);
 441
 442queuing_error:
 443        if (tot_dsds)
 444                scsi_dma_unmap(cmd);
 445
 446        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 447
 448        return (QLA_FUNCTION_FAILED);
 449}
 450
 451/**
 452 * qla2x00_start_iocbs() - Execute the IOCB command
 453 * @vha: HA context
 454 * @req: request queue
 455 */
 456void
 457qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 458{
 459        struct qla_hw_data *ha = vha->hw;
 460        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 461
 462        if (IS_P3P_TYPE(ha)) {
 463                qla82xx_start_iocbs(vha);
 464        } else {
 465                /* Adjust ring index. */
 466                req->ring_index++;
 467                if (req->ring_index == req->length) {
 468                        req->ring_index = 0;
 469                        req->ring_ptr = req->ring;
 470                } else
 471                        req->ring_ptr++;
 472
 473                /* Set chip new ring index. */
 474                if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 475                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 476                } else if (IS_QLA83XX(ha)) {
 477                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 478                        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 479                } else if (IS_QLAFX00(ha)) {
 480                        WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
 481                        RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
 482                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 483                } else if (IS_FWI2_CAPABLE(ha)) {
 484                        WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 485                        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
 486                } else {
 487                        WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
 488                                req->ring_index);
 489                        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
 490                }
 491        }
 492}
 493
 494/**
 495 * qla2x00_marker() - Send a marker IOCB to the firmware.
 496 * @vha: HA context
 497 * @qpair: queue pair pointer
 498 * @loop_id: loop ID
 499 * @lun: LUN
 500 * @type: marker modifier
 501 *
 502 * Can be called from both normal and interrupt context.
 503 *
 504 * Returns non-zero if a failure occurred, else zero.
 505 */
 506static int
 507__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 508    uint16_t loop_id, uint64_t lun, uint8_t type)
 509{
 510        mrk_entry_t *mrk;
 511        struct mrk_entry_24xx *mrk24 = NULL;
 512        struct req_que *req = qpair->req;
 513        struct qla_hw_data *ha = vha->hw;
 514        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 515
 516        mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 517        if (mrk == NULL) {
 518                ql_log(ql_log_warn, base_vha, 0x3026,
 519                    "Failed to allocate Marker IOCB.\n");
 520
 521                return (QLA_FUNCTION_FAILED);
 522        }
 523
 524        mrk->entry_type = MARKER_TYPE;
 525        mrk->modifier = type;
 526        if (type != MK_SYNC_ALL) {
 527                if (IS_FWI2_CAPABLE(ha)) {
 528                        mrk24 = (struct mrk_entry_24xx *) mrk;
 529                        mrk24->nport_handle = cpu_to_le16(loop_id);
 530                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 531                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 532                        mrk24->vp_index = vha->vp_idx;
 533                        mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
 534                } else {
 535                        SET_TARGET_ID(ha, mrk->target, loop_id);
 536                        mrk->lun = cpu_to_le16((uint16_t)lun);
 537                }
 538        }
 539        wmb();
 540
 541        qla2x00_start_iocbs(vha, req);
 542
 543        return (QLA_SUCCESS);
 544}
 545
 546int
 547qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 548    uint16_t loop_id, uint64_t lun, uint8_t type)
 549{
 550        int ret;
 551        unsigned long flags = 0;
 552
 553        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 554        ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
 555        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 556
 557        return (ret);
 558}
 559
 560/*
 561 * qla2x00_issue_marker
 562 *
 563 * Issue marker
 564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 565 * Might release it, then reaquire.
 566 */
 567int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 568{
 569        if (ha_locked) {
 570                if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 571                                        MK_SYNC_ALL) != QLA_SUCCESS)
 572                        return QLA_FUNCTION_FAILED;
 573        } else {
 574                if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 575                                        MK_SYNC_ALL) != QLA_SUCCESS)
 576                        return QLA_FUNCTION_FAILED;
 577        }
 578        vha->marker_needed = 0;
 579
 580        return QLA_SUCCESS;
 581}
 582
 583static inline int
 584qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 585        uint16_t tot_dsds)
 586{
 587        struct dsd64 *cur_dsd = NULL, *next_dsd;
 588        scsi_qla_host_t *vha;
 589        struct qla_hw_data *ha;
 590        struct scsi_cmnd *cmd;
 591        struct  scatterlist *cur_seg;
 592        uint8_t avail_dsds;
 593        uint8_t first_iocb = 1;
 594        uint32_t dsd_list_len;
 595        struct dsd_dma *dsd_ptr;
 596        struct ct6_dsd *ctx;
 597
 598        cmd = GET_CMD_SP(sp);
 599
 600        /* Update entry type to indicate Command Type 3 IOCB */
 601        put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 602
 603        /* No data transfer */
 604        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 605                cmd_pkt->byte_count = cpu_to_le32(0);
 606                return 0;
 607        }
 608
 609        vha = sp->vha;
 610        ha = vha->hw;
 611
 612        /* Set transfer direction */
 613        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 614                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 615                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 616                vha->qla_stats.output_requests++;
 617        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 618                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 619                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 620                vha->qla_stats.input_requests++;
 621        }
 622
 623        cur_seg = scsi_sglist(cmd);
 624        ctx = sp->u.scmd.ct6_ctx;
 625
 626        while (tot_dsds) {
 627                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 628                    QLA_DSDS_PER_IOCB : tot_dsds;
 629                tot_dsds -= avail_dsds;
 630                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 631
 632                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 633                    struct dsd_dma, list);
 634                next_dsd = dsd_ptr->dsd_addr;
 635                list_del(&dsd_ptr->list);
 636                ha->gbl_dsd_avail--;
 637                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 638                ctx->dsd_use_cnt++;
 639                ha->gbl_dsd_inuse++;
 640
 641                if (first_iocb) {
 642                        first_iocb = 0;
 643                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 644                                           &cmd_pkt->fcp_dsd.address);
 645                        cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 646                } else {
 647                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 648                                           &cur_dsd->address);
 649                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 650                        cur_dsd++;
 651                }
 652                cur_dsd = next_dsd;
 653                while (avail_dsds) {
 654                        append_dsd64(&cur_dsd, cur_seg);
 655                        cur_seg = sg_next(cur_seg);
 656                        avail_dsds--;
 657                }
 658        }
 659
 660        /* Null termination */
 661        cur_dsd->address = 0;
 662        cur_dsd->length = 0;
 663        cur_dsd++;
 664        cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 665        return 0;
 666}
 667
 668/*
 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 670 * for Command Type 6.
 671 *
 672 * @dsds: number of data segment decriptors needed
 673 *
 674 * Returns the number of dsd list needed to store @dsds.
 675 */
 676static inline uint16_t
 677qla24xx_calc_dsd_lists(uint16_t dsds)
 678{
 679        uint16_t dsd_lists = 0;
 680
 681        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 682        if (dsds % QLA_DSDS_PER_IOCB)
 683                dsd_lists++;
 684        return dsd_lists;
 685}
 686
 687
 688/**
 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 690 * IOCB types.
 691 *
 692 * @sp: SRB command to process
 693 * @cmd_pkt: Command type 3 IOCB
 694 * @tot_dsds: Total number of segments to transfer
 695 * @req: pointer to request queue
 696 */
 697inline void
 698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 699        uint16_t tot_dsds, struct req_que *req)
 700{
 701        uint16_t        avail_dsds;
 702        struct dsd64    *cur_dsd;
 703        scsi_qla_host_t *vha;
 704        struct scsi_cmnd *cmd;
 705        struct scatterlist *sg;
 706        int i;
 707
 708        cmd = GET_CMD_SP(sp);
 709
 710        /* Update entry type to indicate Command Type 3 IOCB */
 711        put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 712
 713        /* No data transfer */
 714        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 715                cmd_pkt->byte_count = cpu_to_le32(0);
 716                return;
 717        }
 718
 719        vha = sp->vha;
 720
 721        /* Set transfer direction */
 722        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 723                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 724                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 725                vha->qla_stats.output_requests++;
 726        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 727                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 728                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 729                vha->qla_stats.input_requests++;
 730        }
 731
 732        /* One DSD is available in the Command Type 3 IOCB */
 733        avail_dsds = 1;
 734        cur_dsd = &cmd_pkt->dsd;
 735
 736        /* Load data segments */
 737
 738        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 739                cont_a64_entry_t *cont_pkt;
 740
 741                /* Allocate additional continuation packets? */
 742                if (avail_dsds == 0) {
 743                        /*
 744                         * Five DSDs are available in the Continuation
 745                         * Type 1 IOCB.
 746                         */
 747                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 748                        cur_dsd = cont_pkt->dsd;
 749                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 750                }
 751
 752                append_dsd64(&cur_dsd, sg);
 753                avail_dsds--;
 754        }
 755}
 756
 757struct fw_dif_context {
 758        uint32_t ref_tag;
 759        uint16_t app_tag;
 760        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 761        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 762};
 763
 764/*
 765 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 766 *
 767 */
 768static inline void
 769qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 770    unsigned int protcnt)
 771{
 772        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 773
 774        switch (scsi_get_prot_type(cmd)) {
 775        case SCSI_PROT_DIF_TYPE0:
 776                /*
 777                 * No check for ql2xenablehba_err_chk, as it would be an
 778                 * I/O error if hba tag generation is not done.
 779                 */
 780                pkt->ref_tag = cpu_to_le32((uint32_t)
 781                    (0xffffffff & scsi_get_lba(cmd)));
 782
 783                if (!qla2x00_hba_err_chk_enabled(sp))
 784                        break;
 785
 786                pkt->ref_tag_mask[0] = 0xff;
 787                pkt->ref_tag_mask[1] = 0xff;
 788                pkt->ref_tag_mask[2] = 0xff;
 789                pkt->ref_tag_mask[3] = 0xff;
 790                break;
 791
 792        /*
 793         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 794         * match LBA in CDB + N
 795         */
 796        case SCSI_PROT_DIF_TYPE2:
 797                pkt->app_tag = cpu_to_le16(0);
 798                pkt->app_tag_mask[0] = 0x0;
 799                pkt->app_tag_mask[1] = 0x0;
 800
 801                pkt->ref_tag = cpu_to_le32((uint32_t)
 802                    (0xffffffff & scsi_get_lba(cmd)));
 803
 804                if (!qla2x00_hba_err_chk_enabled(sp))
 805                        break;
 806
 807                /* enable ALL bytes of the ref tag */
 808                pkt->ref_tag_mask[0] = 0xff;
 809                pkt->ref_tag_mask[1] = 0xff;
 810                pkt->ref_tag_mask[2] = 0xff;
 811                pkt->ref_tag_mask[3] = 0xff;
 812                break;
 813
 814        /* For Type 3 protection: 16 bit GUARD only */
 815        case SCSI_PROT_DIF_TYPE3:
 816                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 817                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 818                                                                0x00;
 819                break;
 820
 821        /*
 822         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 823         * 16 bit app tag.
 824         */
 825        case SCSI_PROT_DIF_TYPE1:
 826                pkt->ref_tag = cpu_to_le32((uint32_t)
 827                    (0xffffffff & scsi_get_lba(cmd)));
 828                pkt->app_tag = cpu_to_le16(0);
 829                pkt->app_tag_mask[0] = 0x0;
 830                pkt->app_tag_mask[1] = 0x0;
 831
 832                if (!qla2x00_hba_err_chk_enabled(sp))
 833                        break;
 834
 835                /* enable ALL bytes of the ref tag */
 836                pkt->ref_tag_mask[0] = 0xff;
 837                pkt->ref_tag_mask[1] = 0xff;
 838                pkt->ref_tag_mask[2] = 0xff;
 839                pkt->ref_tag_mask[3] = 0xff;
 840                break;
 841        }
 842}
 843
 844int
 845qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 846        uint32_t *partial)
 847{
 848        struct scatterlist *sg;
 849        uint32_t cumulative_partial, sg_len;
 850        dma_addr_t sg_dma_addr;
 851
 852        if (sgx->num_bytes == sgx->tot_bytes)
 853                return 0;
 854
 855        sg = sgx->cur_sg;
 856        cumulative_partial = sgx->tot_partial;
 857
 858        sg_dma_addr = sg_dma_address(sg);
 859        sg_len = sg_dma_len(sg);
 860
 861        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 862
 863        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 864                sgx->dma_len = (blk_sz - cumulative_partial);
 865                sgx->tot_partial = 0;
 866                sgx->num_bytes += blk_sz;
 867                *partial = 0;
 868        } else {
 869                sgx->dma_len = sg_len - sgx->bytes_consumed;
 870                sgx->tot_partial += sgx->dma_len;
 871                *partial = 1;
 872        }
 873
 874        sgx->bytes_consumed += sgx->dma_len;
 875
 876        if (sg_len == sgx->bytes_consumed) {
 877                sg = sg_next(sg);
 878                sgx->num_sg++;
 879                sgx->cur_sg = sg;
 880                sgx->bytes_consumed = 0;
 881        }
 882
 883        return 1;
 884}
 885
 886int
 887qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 888        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 889{
 890        void *next_dsd;
 891        uint8_t avail_dsds = 0;
 892        uint32_t dsd_list_len;
 893        struct dsd_dma *dsd_ptr;
 894        struct scatterlist *sg_prot;
 895        struct dsd64 *cur_dsd = dsd;
 896        uint16_t        used_dsds = tot_dsds;
 897        uint32_t        prot_int; /* protection interval */
 898        uint32_t        partial;
 899        struct qla2_sgx sgx;
 900        dma_addr_t      sle_dma;
 901        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 902        struct scsi_cmnd *cmd;
 903
 904        memset(&sgx, 0, sizeof(struct qla2_sgx));
 905        if (sp) {
 906                cmd = GET_CMD_SP(sp);
 907                prot_int = cmd->device->sector_size;
 908
 909                sgx.tot_bytes = scsi_bufflen(cmd);
 910                sgx.cur_sg = scsi_sglist(cmd);
 911                sgx.sp = sp;
 912
 913                sg_prot = scsi_prot_sglist(cmd);
 914        } else if (tc) {
 915                prot_int      = tc->blk_sz;
 916                sgx.tot_bytes = tc->bufflen;
 917                sgx.cur_sg    = tc->sg;
 918                sg_prot       = tc->prot_sg;
 919        } else {
 920                BUG();
 921                return 1;
 922        }
 923
 924        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 925
 926                sle_dma = sgx.dma_addr;
 927                sle_dma_len = sgx.dma_len;
 928alloc_and_fill:
 929                /* Allocate additional continuation packets? */
 930                if (avail_dsds == 0) {
 931                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 932                                        QLA_DSDS_PER_IOCB : used_dsds;
 933                        dsd_list_len = (avail_dsds + 1) * 12;
 934                        used_dsds -= avail_dsds;
 935
 936                        /* allocate tracking DS */
 937                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 938                        if (!dsd_ptr)
 939                                return 1;
 940
 941                        /* allocate new list */
 942                        dsd_ptr->dsd_addr = next_dsd =
 943                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 944                                &dsd_ptr->dsd_list_dma);
 945
 946                        if (!next_dsd) {
 947                                /*
 948                                 * Need to cleanup only this dsd_ptr, rest
 949                                 * will be done by sp_free_dma()
 950                                 */
 951                                kfree(dsd_ptr);
 952                                return 1;
 953                        }
 954
 955                        if (sp) {
 956                                list_add_tail(&dsd_ptr->list,
 957                                              &sp->u.scmd.crc_ctx->dsd_list);
 958
 959                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 960                        } else {
 961                                list_add_tail(&dsd_ptr->list,
 962                                    &(tc->ctx->dsd_list));
 963                                *tc->ctx_dsd_alloced = 1;
 964                        }
 965
 966
 967                        /* add new list to cmd iocb or last list */
 968                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 969                                           &cur_dsd->address);
 970                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 971                        cur_dsd = next_dsd;
 972                }
 973                put_unaligned_le64(sle_dma, &cur_dsd->address);
 974                cur_dsd->length = cpu_to_le32(sle_dma_len);
 975                cur_dsd++;
 976                avail_dsds--;
 977
 978                if (partial == 0) {
 979                        /* Got a full protection interval */
 980                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 981                        sle_dma_len = 8;
 982
 983                        tot_prot_dma_len += sle_dma_len;
 984                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 985                                tot_prot_dma_len = 0;
 986                                sg_prot = sg_next(sg_prot);
 987                        }
 988
 989                        partial = 1; /* So as to not re-enter this block */
 990                        goto alloc_and_fill;
 991                }
 992        }
 993        /* Null termination */
 994        cur_dsd->address = 0;
 995        cur_dsd->length = 0;
 996        cur_dsd++;
 997        return 0;
 998}
 999
1000int
1001qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1002        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1003{
1004        void *next_dsd;
1005        uint8_t avail_dsds = 0;
1006        uint32_t dsd_list_len;
1007        struct dsd_dma *dsd_ptr;
1008        struct scatterlist *sg, *sgl;
1009        struct dsd64 *cur_dsd = dsd;
1010        int     i;
1011        uint16_t        used_dsds = tot_dsds;
1012        struct scsi_cmnd *cmd;
1013
1014        if (sp) {
1015                cmd = GET_CMD_SP(sp);
1016                sgl = scsi_sglist(cmd);
1017        } else if (tc) {
1018                sgl = tc->sg;
1019        } else {
1020                BUG();
1021                return 1;
1022        }
1023
1024
1025        for_each_sg(sgl, sg, tot_dsds, i) {
1026                /* Allocate additional continuation packets? */
1027                if (avail_dsds == 0) {
1028                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1029                                        QLA_DSDS_PER_IOCB : used_dsds;
1030                        dsd_list_len = (avail_dsds + 1) * 12;
1031                        used_dsds -= avail_dsds;
1032
1033                        /* allocate tracking DS */
1034                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1035                        if (!dsd_ptr)
1036                                return 1;
1037
1038                        /* allocate new list */
1039                        dsd_ptr->dsd_addr = next_dsd =
1040                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1041                                &dsd_ptr->dsd_list_dma);
1042
1043                        if (!next_dsd) {
1044                                /*
1045                                 * Need to cleanup only this dsd_ptr, rest
1046                                 * will be done by sp_free_dma()
1047                                 */
1048                                kfree(dsd_ptr);
1049                                return 1;
1050                        }
1051
1052                        if (sp) {
1053                                list_add_tail(&dsd_ptr->list,
1054                                              &sp->u.scmd.crc_ctx->dsd_list);
1055
1056                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057                        } else {
1058                                list_add_tail(&dsd_ptr->list,
1059                                    &(tc->ctx->dsd_list));
1060                                *tc->ctx_dsd_alloced = 1;
1061                        }
1062
1063                        /* add new list to cmd iocb or last list */
1064                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065                                           &cur_dsd->address);
1066                        cur_dsd->length = cpu_to_le32(dsd_list_len);
1067                        cur_dsd = next_dsd;
1068                }
1069                append_dsd64(&cur_dsd, sg);
1070                avail_dsds--;
1071
1072        }
1073        /* Null termination */
1074        cur_dsd->address = 0;
1075        cur_dsd->length = 0;
1076        cur_dsd++;
1077        return 0;
1078}
1079
1080int
1081qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1082        struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1083{
1084        struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1085        struct scatterlist *sg, *sgl;
1086        struct crc_context *difctx = NULL;
1087        struct scsi_qla_host *vha;
1088        uint dsd_list_len;
1089        uint avail_dsds = 0;
1090        uint used_dsds = tot_dsds;
1091        bool dif_local_dma_alloc = false;
1092        bool direction_to_device = false;
1093        int i;
1094
1095        if (sp) {
1096                struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1097
1098                sgl = scsi_prot_sglist(cmd);
1099                vha = sp->vha;
1100                difctx = sp->u.scmd.crc_ctx;
1101                direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1102                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1103                  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1104                        __func__, cmd, difctx, sp);
1105        } else if (tc) {
1106                vha = tc->vha;
1107                sgl = tc->prot_sg;
1108                difctx = tc->ctx;
1109                direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1110        } else {
1111                BUG();
1112                return 1;
1113        }
1114
1115        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116            "%s: enter (write=%u)\n", __func__, direction_to_device);
1117
1118        /* if initiator doing write or target doing read */
1119        if (direction_to_device) {
1120                for_each_sg(sgl, sg, tot_dsds, i) {
1121                        u64 sle_phys = sg_phys(sg);
1122
1123                        /* If SGE addr + len flips bits in upper 32-bits */
1124                        if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1125                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1126                                    "%s: page boundary crossing (phys=%llx len=%x)\n",
1127                                    __func__, sle_phys, sg->length);
1128
1129                                if (difctx) {
1130                                        ha->dif_bundle_crossed_pages++;
1131                                        dif_local_dma_alloc = true;
1132                                } else {
1133                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134                                            vha, 0xe022,
1135                                            "%s: difctx pointer is NULL\n",
1136                                            __func__);
1137                                }
1138                                break;
1139                        }
1140                }
1141                ha->dif_bundle_writes++;
1142        } else {
1143                ha->dif_bundle_reads++;
1144        }
1145
1146        if (ql2xdifbundlinginternalbuffers)
1147                dif_local_dma_alloc = direction_to_device;
1148
1149        if (dif_local_dma_alloc) {
1150                u32 track_difbundl_buf = 0;
1151                u32 ldma_sg_len = 0;
1152                u8 ldma_needed = 1;
1153
1154                difctx->no_dif_bundl = 0;
1155                difctx->dif_bundl_len = 0;
1156
1157                /* Track DSD buffers */
1158                INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1159                /* Track local DMA buffers */
1160                INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1161
1162                for_each_sg(sgl, sg, tot_dsds, i) {
1163                        u32 sglen = sg_dma_len(sg);
1164
1165                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1166                            "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1167                            __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1168                            difctx->dif_bundl_len, ldma_needed);
1169
1170                        while (sglen) {
1171                                u32 xfrlen = 0;
1172
1173                                if (ldma_needed) {
1174                                        /*
1175                                         * Allocate list item to store
1176                                         * the DMA buffers
1177                                         */
1178                                        dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1179                                            GFP_ATOMIC);
1180                                        if (!dsd_ptr) {
1181                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1182                                                    "%s: failed alloc dsd_ptr\n",
1183                                                    __func__);
1184                                                return 1;
1185                                        }
1186                                        ha->dif_bundle_kallocs++;
1187
1188                                        /* allocate dma buffer */
1189                                        dsd_ptr->dsd_addr = dma_pool_alloc
1190                                                (ha->dif_bundl_pool, GFP_ATOMIC,
1191                                                 &dsd_ptr->dsd_list_dma);
1192                                        if (!dsd_ptr->dsd_addr) {
1193                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1194                                                    "%s: failed alloc ->dsd_ptr\n",
1195                                                    __func__);
1196                                                /*
1197                                                 * need to cleanup only this
1198                                                 * dsd_ptr rest will be done
1199                                                 * by sp_free_dma()
1200                                                 */
1201                                                kfree(dsd_ptr);
1202                                                ha->dif_bundle_kallocs--;
1203                                                return 1;
1204                                        }
1205                                        ha->dif_bundle_dma_allocs++;
1206                                        ldma_needed = 0;
1207                                        difctx->no_dif_bundl++;
1208                                        list_add_tail(&dsd_ptr->list,
1209                                            &difctx->ldif_dma_hndl_list);
1210                                }
1211
1212                                /* xfrlen is min of dma pool size and sglen */
1213                                xfrlen = (sglen >
1214                                   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1215                                    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1216                                    sglen;
1217
1218                                /* replace with local allocated dma buffer */
1219                                sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1220                                    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1221                                    difctx->dif_bundl_len);
1222                                difctx->dif_bundl_len += xfrlen;
1223                                sglen -= xfrlen;
1224                                ldma_sg_len += xfrlen;
1225                                if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1226                                    sg_is_last(sg)) {
1227                                        ldma_needed = 1;
1228                                        ldma_sg_len = 0;
1229                                }
1230                        }
1231                }
1232
1233                track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1234                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1235                    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1236                    difctx->dif_bundl_len, difctx->no_dif_bundl,
1237                    track_difbundl_buf);
1238
1239                if (sp)
1240                        sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241                else
1242                        tc->prot_flags = DIF_BUNDL_DMA_VALID;
1243
1244                list_for_each_entry_safe(dif_dsd, nxt_dsd,
1245                    &difctx->ldif_dma_hndl_list, list) {
1246                        u32 sglen = (difctx->dif_bundl_len >
1247                            DIF_BUNDLING_DMA_POOL_SIZE) ?
1248                            DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1249
1250                        BUG_ON(track_difbundl_buf == 0);
1251
1252                        /* Allocate additional continuation packets? */
1253                        if (avail_dsds == 0) {
1254                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255                                    0xe024,
1256                                    "%s: adding continuation iocb's\n",
1257                                    __func__);
1258                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1259                                    QLA_DSDS_PER_IOCB : used_dsds;
1260                                dsd_list_len = (avail_dsds + 1) * 12;
1261                                used_dsds -= avail_dsds;
1262
1263                                /* allocate tracking DS */
1264                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265                                if (!dsd_ptr) {
1266                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1267                                            "%s: failed alloc dsd_ptr\n",
1268                                            __func__);
1269                                        return 1;
1270                                }
1271                                ha->dif_bundle_kallocs++;
1272
1273                                difctx->no_ldif_dsd++;
1274                                /* allocate new list */
1275                                dsd_ptr->dsd_addr =
1276                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1277                                        &dsd_ptr->dsd_list_dma);
1278                                if (!dsd_ptr->dsd_addr) {
1279                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280                                            "%s: failed alloc ->dsd_addr\n",
1281                                            __func__);
1282                                        /*
1283                                         * need to cleanup only this dsd_ptr
1284                                         *  rest will be done by sp_free_dma()
1285                                         */
1286                                        kfree(dsd_ptr);
1287                                        ha->dif_bundle_kallocs--;
1288                                        return 1;
1289                                }
1290                                ha->dif_bundle_dma_allocs++;
1291
1292                                if (sp) {
1293                                        list_add_tail(&dsd_ptr->list,
1294                                            &difctx->ldif_dsd_list);
1295                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296                                } else {
1297                                        list_add_tail(&dsd_ptr->list,
1298                                            &difctx->ldif_dsd_list);
1299                                        tc->ctx_dsd_alloced = 1;
1300                                }
1301
1302                                /* add new list to cmd iocb or last list */
1303                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304                                                   &cur_dsd->address);
1305                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1306                                cur_dsd = dsd_ptr->dsd_addr;
1307                        }
1308                        put_unaligned_le64(dif_dsd->dsd_list_dma,
1309                                           &cur_dsd->address);
1310                        cur_dsd->length = cpu_to_le32(sglen);
1311                        cur_dsd++;
1312                        avail_dsds--;
1313                        difctx->dif_bundl_len -= sglen;
1314                        track_difbundl_buf--;
1315                }
1316
1317                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1318                    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1319                        difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320        } else {
1321                for_each_sg(sgl, sg, tot_dsds, i) {
1322                        /* Allocate additional continuation packets? */
1323                        if (avail_dsds == 0) {
1324                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1325                                    QLA_DSDS_PER_IOCB : used_dsds;
1326                                dsd_list_len = (avail_dsds + 1) * 12;
1327                                used_dsds -= avail_dsds;
1328
1329                                /* allocate tracking DS */
1330                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331                                if (!dsd_ptr) {
1332                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333                                            vha, 0xe027,
1334                                            "%s: failed alloc dsd_dma...\n",
1335                                            __func__);
1336                                        return 1;
1337                                }
1338
1339                                /* allocate new list */
1340                                dsd_ptr->dsd_addr =
1341                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1342                                        &dsd_ptr->dsd_list_dma);
1343                                if (!dsd_ptr->dsd_addr) {
1344                                        /* need to cleanup only this dsd_ptr */
1345                                        /* rest will be done by sp_free_dma() */
1346                                        kfree(dsd_ptr);
1347                                        return 1;
1348                                }
1349
1350                                if (sp) {
1351                                        list_add_tail(&dsd_ptr->list,
1352                                            &difctx->dsd_list);
1353                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354                                } else {
1355                                        list_add_tail(&dsd_ptr->list,
1356                                            &difctx->dsd_list);
1357                                        tc->ctx_dsd_alloced = 1;
1358                                }
1359
1360                                /* add new list to cmd iocb or last list */
1361                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362                                                   &cur_dsd->address);
1363                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1364                                cur_dsd = dsd_ptr->dsd_addr;
1365                        }
1366                        append_dsd64(&cur_dsd, sg);
1367                        avail_dsds--;
1368                }
1369        }
1370        /* Null termination */
1371        cur_dsd->address = 0;
1372        cur_dsd->length = 0;
1373        cur_dsd++;
1374        return 0;
1375}
1376
1377/**
1378 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1379 *                                                      Type 6 IOCB types.
1380 *
1381 * @sp: SRB command to process
1382 * @cmd_pkt: Command type 3 IOCB
1383 * @tot_dsds: Total number of segments to transfer
1384 * @tot_prot_dsds: Total number of segments with protection information
1385 * @fw_prot_opts: Protection options to be passed to firmware
1386 */
1387static inline int
1388qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1389    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1390{
1391        struct dsd64            *cur_dsd;
1392        uint32_t                *fcp_dl;
1393        scsi_qla_host_t         *vha;
1394        struct scsi_cmnd        *cmd;
1395        uint32_t                total_bytes = 0;
1396        uint32_t                data_bytes;
1397        uint32_t                dif_bytes;
1398        uint8_t                 bundling = 1;
1399        uint16_t                blk_size;
1400        struct crc_context      *crc_ctx_pkt = NULL;
1401        struct qla_hw_data      *ha;
1402        uint8_t                 additional_fcpcdb_len;
1403        uint16_t                fcp_cmnd_len;
1404        struct fcp_cmnd         *fcp_cmnd;
1405        dma_addr_t              crc_ctx_dma;
1406
1407        cmd = GET_CMD_SP(sp);
1408
1409        /* Update entry type to indicate Command Type CRC_2 IOCB */
1410        put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1411
1412        vha = sp->vha;
1413        ha = vha->hw;
1414
1415        /* No data transfer */
1416        data_bytes = scsi_bufflen(cmd);
1417        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1418                cmd_pkt->byte_count = cpu_to_le32(0);
1419                return QLA_SUCCESS;
1420        }
1421
1422        cmd_pkt->vp_index = sp->vha->vp_idx;
1423
1424        /* Set transfer direction */
1425        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1426                cmd_pkt->control_flags =
1427                    cpu_to_le16(CF_WRITE_DATA);
1428        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1429                cmd_pkt->control_flags =
1430                    cpu_to_le16(CF_READ_DATA);
1431        }
1432
1433        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1434            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1435            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1436            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1437                bundling = 0;
1438
1439        /* Allocate CRC context from global pool */
1440        crc_ctx_pkt = sp->u.scmd.crc_ctx =
1441            dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1442
1443        if (!crc_ctx_pkt)
1444                goto crc_queuing_error;
1445
1446        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1447
1448        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1449
1450        /* Set handle */
1451        crc_ctx_pkt->handle = cmd_pkt->handle;
1452
1453        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1454
1455        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1456            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1457
1458        put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1459        cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1460
1461        /* Determine SCSI command length -- align to 4 byte boundary */
1462        if (cmd->cmd_len > 16) {
1463                additional_fcpcdb_len = cmd->cmd_len - 16;
1464                if ((cmd->cmd_len % 4) != 0) {
1465                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1466                        goto crc_queuing_error;
1467                }
1468                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469        } else {
1470                additional_fcpcdb_len = 0;
1471                fcp_cmnd_len = 12 + 16 + 4;
1472        }
1473
1474        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1475
1476        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1477        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1478                fcp_cmnd->additional_cdb_len |= 1;
1479        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1480                fcp_cmnd->additional_cdb_len |= 2;
1481
1482        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1483        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1484        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1485        put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1486                           &cmd_pkt->fcp_cmnd_dseg_address);
1487        fcp_cmnd->task_management = 0;
1488        fcp_cmnd->task_attribute = TSK_SIMPLE;
1489
1490        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1491
1492        /* Compute dif len and adjust data len to incude protection */
1493        dif_bytes = 0;
1494        blk_size = cmd->device->sector_size;
1495        dif_bytes = (data_bytes / blk_size) * 8;
1496
1497        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1498        case SCSI_PROT_READ_INSERT:
1499        case SCSI_PROT_WRITE_STRIP:
1500                total_bytes = data_bytes;
1501                data_bytes += dif_bytes;
1502                break;
1503
1504        case SCSI_PROT_READ_STRIP:
1505        case SCSI_PROT_WRITE_INSERT:
1506        case SCSI_PROT_READ_PASS:
1507        case SCSI_PROT_WRITE_PASS:
1508                total_bytes = data_bytes + dif_bytes;
1509                break;
1510        default:
1511                BUG();
1512        }
1513
1514        if (!qla2x00_hba_err_chk_enabled(sp))
1515                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1516        /* HBA error checking enabled */
1517        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1518                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1519                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1520                        SCSI_PROT_DIF_TYPE2))
1521                        fw_prot_opts |= BIT_10;
1522                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1523                    SCSI_PROT_DIF_TYPE3)
1524                        fw_prot_opts |= BIT_11;
1525        }
1526
1527        if (!bundling) {
1528                cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1529        } else {
1530                /*
1531                 * Configure Bundling if we need to fetch interlaving
1532                 * protection PCI accesses
1533                 */
1534                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1535                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1536                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537                                                        tot_prot_dsds);
1538                cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1539        }
1540
1541        /* Finish the common fields of CRC pkt */
1542        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1543        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1544        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1545        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1546        /* Fibre channel byte count */
1547        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1548        fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549            additional_fcpcdb_len);
1550        *fcp_dl = htonl(total_bytes);
1551
1552        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1553                cmd_pkt->byte_count = cpu_to_le32(0);
1554                return QLA_SUCCESS;
1555        }
1556        /* Walks data segments */
1557
1558        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1559
1560        if (!bundling && tot_prot_dsds) {
1561                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1562                        cur_dsd, tot_dsds, NULL))
1563                        goto crc_queuing_error;
1564        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1565                        (tot_dsds - tot_prot_dsds), NULL))
1566                goto crc_queuing_error;
1567
1568        if (bundling && tot_prot_dsds) {
1569                /* Walks dif segments */
1570                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1571                cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1572                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1573                                tot_prot_dsds, NULL))
1574                        goto crc_queuing_error;
1575        }
1576        return QLA_SUCCESS;
1577
1578crc_queuing_error:
1579        /* Cleanup will be performed by the caller */
1580
1581        return QLA_FUNCTION_FAILED;
1582}
1583
1584/**
1585 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1586 * @sp: command to send to the ISP
1587 *
1588 * Returns non-zero if a failure occurred, else zero.
1589 */
1590int
1591qla24xx_start_scsi(srb_t *sp)
1592{
1593        int             nseg;
1594        unsigned long   flags;
1595        uint32_t        *clr_ptr;
1596        uint32_t        handle;
1597        struct cmd_type_7 *cmd_pkt;
1598        uint16_t        cnt;
1599        uint16_t        req_cnt;
1600        uint16_t        tot_dsds;
1601        struct req_que *req = NULL;
1602        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1603        struct scsi_qla_host *vha = sp->vha;
1604        struct qla_hw_data *ha = vha->hw;
1605
1606        /* Setup device pointers. */
1607        req = vha->req;
1608
1609        /* So we know we haven't pci_map'ed anything yet */
1610        tot_dsds = 0;
1611
1612        /* Send marker if required */
1613        if (vha->marker_needed != 0) {
1614                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615                    QLA_SUCCESS)
1616                        return QLA_FUNCTION_FAILED;
1617                vha->marker_needed = 0;
1618        }
1619
1620        /* Acquire ring specific lock */
1621        spin_lock_irqsave(&ha->hardware_lock, flags);
1622
1623        handle = qla2xxx_get_next_handle(req);
1624        if (handle == 0)
1625                goto queuing_error;
1626
1627        /* Map the sg table so we have an accurate count of sg entries needed */
1628        if (scsi_sg_count(cmd)) {
1629                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630                    scsi_sg_count(cmd), cmd->sc_data_direction);
1631                if (unlikely(!nseg))
1632                        goto queuing_error;
1633        } else
1634                nseg = 0;
1635
1636        tot_dsds = nseg;
1637        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638        if (req->cnt < (req_cnt + 2)) {
1639                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640                    RD_REG_DWORD_RELAXED(req->req_q_out);
1641                if (req->ring_index < cnt)
1642                        req->cnt = cnt - req->ring_index;
1643                else
1644                        req->cnt = req->length -
1645                                (req->ring_index - cnt);
1646                if (req->cnt < (req_cnt + 2))
1647                        goto queuing_error;
1648        }
1649
1650        /* Build command packet. */
1651        req->current_outstanding_cmd = handle;
1652        req->outstanding_cmds[handle] = sp;
1653        sp->handle = handle;
1654        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655        req->cnt -= req_cnt;
1656
1657        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1659
1660        /* Zero out remaining portion of packet. */
1661        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662        clr_ptr = (uint32_t *)cmd_pkt + 2;
1663        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666        /* Set NPORT-ID and LUN number*/
1667        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671        cmd_pkt->vp_index = sp->vha->vp_idx;
1672
1673        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1675
1676        cmd_pkt->task = TSK_SIMPLE;
1677
1678        /* Load SCSI command packet. */
1679        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681
1682        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1683
1684        /* Build IOCB segments */
1685        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1686
1687        /* Set total data segment count. */
1688        cmd_pkt->entry_count = (uint8_t)req_cnt;
1689        wmb();
1690        /* Adjust ring index. */
1691        req->ring_index++;
1692        if (req->ring_index == req->length) {
1693                req->ring_index = 0;
1694                req->ring_ptr = req->ring;
1695        } else
1696                req->ring_ptr++;
1697
1698        sp->flags |= SRB_DMA_VALID;
1699
1700        /* Set chip new ring index. */
1701        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1702
1703        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704        return QLA_SUCCESS;
1705
1706queuing_error:
1707        if (tot_dsds)
1708                scsi_dma_unmap(cmd);
1709
1710        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711
1712        return QLA_FUNCTION_FAILED;
1713}
1714
1715/**
1716 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717 * @sp: command to send to the ISP
1718 *
1719 * Returns non-zero if a failure occurred, else zero.
1720 */
1721int
1722qla24xx_dif_start_scsi(srb_t *sp)
1723{
1724        int                     nseg;
1725        unsigned long           flags;
1726        uint32_t                *clr_ptr;
1727        uint32_t                handle;
1728        uint16_t                cnt;
1729        uint16_t                req_cnt = 0;
1730        uint16_t                tot_dsds;
1731        uint16_t                tot_prot_dsds;
1732        uint16_t                fw_prot_opts = 0;
1733        struct req_que          *req = NULL;
1734        struct rsp_que          *rsp = NULL;
1735        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1736        struct scsi_qla_host    *vha = sp->vha;
1737        struct qla_hw_data      *ha = vha->hw;
1738        struct cmd_type_crc_2   *cmd_pkt;
1739        uint32_t                status = 0;
1740
1741#define QDSS_GOT_Q_SPACE        BIT_0
1742
1743        /* Only process protection or >16 cdb in this routine */
1744        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745                if (cmd->cmd_len <= 16)
1746                        return qla24xx_start_scsi(sp);
1747        }
1748
1749        /* Setup device pointers. */
1750        req = vha->req;
1751        rsp = req->rsp;
1752
1753        /* So we know we haven't pci_map'ed anything yet */
1754        tot_dsds = 0;
1755
1756        /* Send marker if required */
1757        if (vha->marker_needed != 0) {
1758                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759                    QLA_SUCCESS)
1760                        return QLA_FUNCTION_FAILED;
1761                vha->marker_needed = 0;
1762        }
1763
1764        /* Acquire ring specific lock */
1765        spin_lock_irqsave(&ha->hardware_lock, flags);
1766
1767        handle = qla2xxx_get_next_handle(req);
1768        if (handle == 0)
1769                goto queuing_error;
1770
1771        /* Compute number of required data segments */
1772        /* Map the sg table so we have an accurate count of sg entries needed */
1773        if (scsi_sg_count(cmd)) {
1774                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1775                    scsi_sg_count(cmd), cmd->sc_data_direction);
1776                if (unlikely(!nseg))
1777                        goto queuing_error;
1778                else
1779                        sp->flags |= SRB_DMA_VALID;
1780
1781                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1782                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1783                        struct qla2_sgx sgx;
1784                        uint32_t        partial;
1785
1786                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1787                        sgx.tot_bytes = scsi_bufflen(cmd);
1788                        sgx.cur_sg = scsi_sglist(cmd);
1789                        sgx.sp = sp;
1790
1791                        nseg = 0;
1792                        while (qla24xx_get_one_block_sg(
1793                            cmd->device->sector_size, &sgx, &partial))
1794                                nseg++;
1795                }
1796        } else
1797                nseg = 0;
1798
1799        /* number of required data segments */
1800        tot_dsds = nseg;
1801
1802        /* Compute number of required protection segments */
1803        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1804                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1805                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1806                if (unlikely(!nseg))
1807                        goto queuing_error;
1808                else
1809                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1810
1811                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1812                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1813                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1814                }
1815        } else {
1816                nseg = 0;
1817        }
1818
1819        req_cnt = 1;
1820        /* Total Data and protection sg segment(s) */
1821        tot_prot_dsds = nseg;
1822        tot_dsds += nseg;
1823        if (req->cnt < (req_cnt + 2)) {
1824                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1825                    RD_REG_DWORD_RELAXED(req->req_q_out);
1826                if (req->ring_index < cnt)
1827                        req->cnt = cnt - req->ring_index;
1828                else
1829                        req->cnt = req->length -
1830                                (req->ring_index - cnt);
1831                if (req->cnt < (req_cnt + 2))
1832                        goto queuing_error;
1833        }
1834
1835        status |= QDSS_GOT_Q_SPACE;
1836
1837        /* Build header part of command packet (excluding the OPCODE). */
1838        req->current_outstanding_cmd = handle;
1839        req->outstanding_cmds[handle] = sp;
1840        sp->handle = handle;
1841        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1842        req->cnt -= req_cnt;
1843
1844        /* Fill-in common area */
1845        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1846        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1847
1848        clr_ptr = (uint32_t *)cmd_pkt + 2;
1849        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1850
1851        /* Set NPORT-ID and LUN number*/
1852        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1853        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1854        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1855        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1856
1857        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1858        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1859
1860        /* Total Data and protection segment(s) */
1861        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1862
1863        /* Build IOCB segments and adjust for data protection segments */
1864        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1865            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1866                QLA_SUCCESS)
1867                goto queuing_error;
1868
1869        cmd_pkt->entry_count = (uint8_t)req_cnt;
1870        /* Specify response queue number where completion should happen */
1871        cmd_pkt->entry_status = (uint8_t) rsp->id;
1872        cmd_pkt->timeout = cpu_to_le16(0);
1873        wmb();
1874
1875        /* Adjust ring index. */
1876        req->ring_index++;
1877        if (req->ring_index == req->length) {
1878                req->ring_index = 0;
1879                req->ring_ptr = req->ring;
1880        } else
1881                req->ring_ptr++;
1882
1883        /* Set chip new ring index. */
1884        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1885
1886        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1887
1888        return QLA_SUCCESS;
1889
1890queuing_error:
1891        if (status & QDSS_GOT_Q_SPACE) {
1892                req->outstanding_cmds[handle] = NULL;
1893                req->cnt += req_cnt;
1894        }
1895        /* Cleanup will be performed by the caller (queuecommand) */
1896
1897        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898        return QLA_FUNCTION_FAILED;
1899}
1900
1901/**
1902 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1903 * @sp: command to send to the ISP
1904 *
1905 * Returns non-zero if a failure occurred, else zero.
1906 */
1907static int
1908qla2xxx_start_scsi_mq(srb_t *sp)
1909{
1910        int             nseg;
1911        unsigned long   flags;
1912        uint32_t        *clr_ptr;
1913        uint32_t        handle;
1914        struct cmd_type_7 *cmd_pkt;
1915        uint16_t        cnt;
1916        uint16_t        req_cnt;
1917        uint16_t        tot_dsds;
1918        struct req_que *req = NULL;
1919        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1920        struct scsi_qla_host *vha = sp->fcport->vha;
1921        struct qla_hw_data *ha = vha->hw;
1922        struct qla_qpair *qpair = sp->qpair;
1923
1924        /* Acquire qpair specific lock */
1925        spin_lock_irqsave(&qpair->qp_lock, flags);
1926
1927        /* Setup qpair pointers */
1928        req = qpair->req;
1929
1930        /* So we know we haven't pci_map'ed anything yet */
1931        tot_dsds = 0;
1932
1933        /* Send marker if required */
1934        if (vha->marker_needed != 0) {
1935                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936                    QLA_SUCCESS) {
1937                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938                        return QLA_FUNCTION_FAILED;
1939                }
1940                vha->marker_needed = 0;
1941        }
1942
1943        handle = qla2xxx_get_next_handle(req);
1944        if (handle == 0)
1945                goto queuing_error;
1946
1947        /* Map the sg table so we have an accurate count of sg entries needed */
1948        if (scsi_sg_count(cmd)) {
1949                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950                    scsi_sg_count(cmd), cmd->sc_data_direction);
1951                if (unlikely(!nseg))
1952                        goto queuing_error;
1953        } else
1954                nseg = 0;
1955
1956        tot_dsds = nseg;
1957        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958        if (req->cnt < (req_cnt + 2)) {
1959                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1960                    RD_REG_DWORD_RELAXED(req->req_q_out);
1961                if (req->ring_index < cnt)
1962                        req->cnt = cnt - req->ring_index;
1963                else
1964                        req->cnt = req->length -
1965                                (req->ring_index - cnt);
1966                if (req->cnt < (req_cnt + 2))
1967                        goto queuing_error;
1968        }
1969
1970        /* Build command packet. */
1971        req->current_outstanding_cmd = handle;
1972        req->outstanding_cmds[handle] = sp;
1973        sp->handle = handle;
1974        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1975        req->cnt -= req_cnt;
1976
1977        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1978        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1979
1980        /* Zero out remaining portion of packet. */
1981        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1982        clr_ptr = (uint32_t *)cmd_pkt + 2;
1983        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1984        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1985
1986        /* Set NPORT-ID and LUN number*/
1987        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1988        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1989        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1990        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1991        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1992
1993        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1994        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1995
1996        cmd_pkt->task = TSK_SIMPLE;
1997
1998        /* Load SCSI command packet. */
1999        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2000        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2001
2002        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2003
2004        /* Build IOCB segments */
2005        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2006
2007        /* Set total data segment count. */
2008        cmd_pkt->entry_count = (uint8_t)req_cnt;
2009        wmb();
2010        /* Adjust ring index. */
2011        req->ring_index++;
2012        if (req->ring_index == req->length) {
2013                req->ring_index = 0;
2014                req->ring_ptr = req->ring;
2015        } else
2016                req->ring_ptr++;
2017
2018        sp->flags |= SRB_DMA_VALID;
2019
2020        /* Set chip new ring index. */
2021        WRT_REG_DWORD(req->req_q_in, req->ring_index);
2022
2023        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2024        return QLA_SUCCESS;
2025
2026queuing_error:
2027        if (tot_dsds)
2028                scsi_dma_unmap(cmd);
2029
2030        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2031
2032        return QLA_FUNCTION_FAILED;
2033}
2034
2035
2036/**
2037 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2038 * @sp: command to send to the ISP
2039 *
2040 * Returns non-zero if a failure occurred, else zero.
2041 */
2042int
2043qla2xxx_dif_start_scsi_mq(srb_t *sp)
2044{
2045        int                     nseg;
2046        unsigned long           flags;
2047        uint32_t                *clr_ptr;
2048        uint32_t                handle;
2049        uint16_t                cnt;
2050        uint16_t                req_cnt = 0;
2051        uint16_t                tot_dsds;
2052        uint16_t                tot_prot_dsds;
2053        uint16_t                fw_prot_opts = 0;
2054        struct req_que          *req = NULL;
2055        struct rsp_que          *rsp = NULL;
2056        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2057        struct scsi_qla_host    *vha = sp->fcport->vha;
2058        struct qla_hw_data      *ha = vha->hw;
2059        struct cmd_type_crc_2   *cmd_pkt;
2060        uint32_t                status = 0;
2061        struct qla_qpair        *qpair = sp->qpair;
2062
2063#define QDSS_GOT_Q_SPACE        BIT_0
2064
2065        /* Check for host side state */
2066        if (!qpair->online) {
2067                cmd->result = DID_NO_CONNECT << 16;
2068                return QLA_INTERFACE_ERROR;
2069        }
2070
2071        if (!qpair->difdix_supported &&
2072                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2073                cmd->result = DID_NO_CONNECT << 16;
2074                return QLA_INTERFACE_ERROR;
2075        }
2076
2077        /* Only process protection or >16 cdb in this routine */
2078        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2079                if (cmd->cmd_len <= 16)
2080                        return qla2xxx_start_scsi_mq(sp);
2081        }
2082
2083        spin_lock_irqsave(&qpair->qp_lock, flags);
2084
2085        /* Setup qpair pointers */
2086        rsp = qpair->rsp;
2087        req = qpair->req;
2088
2089        /* So we know we haven't pci_map'ed anything yet */
2090        tot_dsds = 0;
2091
2092        /* Send marker if required */
2093        if (vha->marker_needed != 0) {
2094                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095                    QLA_SUCCESS) {
2096                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2097                        return QLA_FUNCTION_FAILED;
2098                }
2099                vha->marker_needed = 0;
2100        }
2101
2102        handle = qla2xxx_get_next_handle(req);
2103        if (handle == 0)
2104                goto queuing_error;
2105
2106        /* Compute number of required data segments */
2107        /* Map the sg table so we have an accurate count of sg entries needed */
2108        if (scsi_sg_count(cmd)) {
2109                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2110                    scsi_sg_count(cmd), cmd->sc_data_direction);
2111                if (unlikely(!nseg))
2112                        goto queuing_error;
2113                else
2114                        sp->flags |= SRB_DMA_VALID;
2115
2116                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2117                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2118                        struct qla2_sgx sgx;
2119                        uint32_t        partial;
2120
2121                        memset(&sgx, 0, sizeof(struct qla2_sgx));
2122                        sgx.tot_bytes = scsi_bufflen(cmd);
2123                        sgx.cur_sg = scsi_sglist(cmd);
2124                        sgx.sp = sp;
2125
2126                        nseg = 0;
2127                        while (qla24xx_get_one_block_sg(
2128                            cmd->device->sector_size, &sgx, &partial))
2129                                nseg++;
2130                }
2131        } else
2132                nseg = 0;
2133
2134        /* number of required data segments */
2135        tot_dsds = nseg;
2136
2137        /* Compute number of required protection segments */
2138        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2139                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2140                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2141                if (unlikely(!nseg))
2142                        goto queuing_error;
2143                else
2144                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
2145
2146                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2147                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2148                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2149                }
2150        } else {
2151                nseg = 0;
2152        }
2153
2154        req_cnt = 1;
2155        /* Total Data and protection sg segment(s) */
2156        tot_prot_dsds = nseg;
2157        tot_dsds += nseg;
2158        if (req->cnt < (req_cnt + 2)) {
2159                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2160                    RD_REG_DWORD_RELAXED(req->req_q_out);
2161                if (req->ring_index < cnt)
2162                        req->cnt = cnt - req->ring_index;
2163                else
2164                        req->cnt = req->length -
2165                                (req->ring_index - cnt);
2166                if (req->cnt < (req_cnt + 2))
2167                        goto queuing_error;
2168        }
2169
2170        status |= QDSS_GOT_Q_SPACE;
2171
2172        /* Build header part of command packet (excluding the OPCODE). */
2173        req->current_outstanding_cmd = handle;
2174        req->outstanding_cmds[handle] = sp;
2175        sp->handle = handle;
2176        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2177        req->cnt -= req_cnt;
2178
2179        /* Fill-in common area */
2180        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2181        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2182
2183        clr_ptr = (uint32_t *)cmd_pkt + 2;
2184        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2185
2186        /* Set NPORT-ID and LUN number*/
2187        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2188        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2189        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2190        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2191
2192        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2193        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2194
2195        /* Total Data and protection segment(s) */
2196        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2197
2198        /* Build IOCB segments and adjust for data protection segments */
2199        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2200            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2201                QLA_SUCCESS)
2202                goto queuing_error;
2203
2204        cmd_pkt->entry_count = (uint8_t)req_cnt;
2205        cmd_pkt->timeout = cpu_to_le16(0);
2206        wmb();
2207
2208        /* Adjust ring index. */
2209        req->ring_index++;
2210        if (req->ring_index == req->length) {
2211                req->ring_index = 0;
2212                req->ring_ptr = req->ring;
2213        } else
2214                req->ring_ptr++;
2215
2216        /* Set chip new ring index. */
2217        WRT_REG_DWORD(req->req_q_in, req->ring_index);
2218
2219        /* Manage unprocessed RIO/ZIO commands in response queue. */
2220        if (vha->flags.process_response_queue &&
2221            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2222                qla24xx_process_response_queue(vha, rsp);
2223
2224        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2225
2226        return QLA_SUCCESS;
2227
2228queuing_error:
2229        if (status & QDSS_GOT_Q_SPACE) {
2230                req->outstanding_cmds[handle] = NULL;
2231                req->cnt += req_cnt;
2232        }
2233        /* Cleanup will be performed by the caller (queuecommand) */
2234
2235        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2236        return QLA_FUNCTION_FAILED;
2237}
2238
2239/* Generic Control-SRB manipulation functions. */
2240
2241/* hardware_lock assumed to be held. */
2242
2243void *
2244__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2245{
2246        scsi_qla_host_t *vha = qpair->vha;
2247        struct qla_hw_data *ha = vha->hw;
2248        struct req_que *req = qpair->req;
2249        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2250        uint32_t handle;
2251        request_t *pkt;
2252        uint16_t cnt, req_cnt;
2253
2254        pkt = NULL;
2255        req_cnt = 1;
2256        handle = 0;
2257
2258        if (sp && (sp->type != SRB_SCSI_CMD)) {
2259                /* Adjust entry-counts as needed. */
2260                req_cnt = sp->iocbs;
2261        }
2262
2263        /* Check for room on request queue. */
2264        if (req->cnt < req_cnt + 2) {
2265                if (qpair->use_shadow_reg)
2266                        cnt = *req->out_ptr;
2267                else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268                    IS_QLA28XX(ha))
2269                        cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2270                else if (IS_P3P_TYPE(ha))
2271                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2272                else if (IS_FWI2_CAPABLE(ha))
2273                        cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2274                else if (IS_QLAFX00(ha))
2275                        cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2276                else
2277                        cnt = qla2x00_debounce_register(
2278                            ISP_REQ_Q_OUT(ha, &reg->isp));
2279
2280                if  (req->ring_index < cnt)
2281                        req->cnt = cnt - req->ring_index;
2282                else
2283                        req->cnt = req->length -
2284                            (req->ring_index - cnt);
2285        }
2286        if (req->cnt < req_cnt + 2)
2287                goto queuing_error;
2288
2289        if (sp) {
2290                handle = qla2xxx_get_next_handle(req);
2291                if (handle == 0) {
2292                        ql_log(ql_log_warn, vha, 0x700b,
2293                            "No room on outstanding cmd array.\n");
2294                        goto queuing_error;
2295                }
2296
2297                /* Prep command array. */
2298                req->current_outstanding_cmd = handle;
2299                req->outstanding_cmds[handle] = sp;
2300                sp->handle = handle;
2301        }
2302
2303        /* Prep packet */
2304        req->cnt -= req_cnt;
2305        pkt = req->ring_ptr;
2306        memset(pkt, 0, REQUEST_ENTRY_SIZE);
2307        if (IS_QLAFX00(ha)) {
2308                WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2309                WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2310        } else {
2311                pkt->entry_count = req_cnt;
2312                pkt->handle = handle;
2313        }
2314
2315        return pkt;
2316
2317queuing_error:
2318        qpair->tgt_counters.num_alloc_iocb_failed++;
2319        return pkt;
2320}
2321
2322void *
2323qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2324{
2325        scsi_qla_host_t *vha = qpair->vha;
2326
2327        if (qla2x00_reset_active(vha))
2328                return NULL;
2329
2330        return __qla2x00_alloc_iocbs(qpair, sp);
2331}
2332
2333void *
2334qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2335{
2336        return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2337}
2338
2339static void
2340qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2341{
2342        struct srb_iocb *lio = &sp->u.iocb_cmd;
2343
2344        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2345        logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2346        if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2347                logio->control_flags |= LCF_NVME_PRLI;
2348                if (sp->vha->flags.nvme_first_burst)
2349                        logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2350        }
2351
2352        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354        logio->port_id[1] = sp->fcport->d_id.b.area;
2355        logio->port_id[2] = sp->fcport->d_id.b.domain;
2356        logio->vp_index = sp->vha->vp_idx;
2357}
2358
2359static void
2360qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2361{
2362        struct srb_iocb *lio = &sp->u.iocb_cmd;
2363
2364        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2365        if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2366                logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2367        } else {
2368                logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2369                if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2370                        logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2371                if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2372                        logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2373        }
2374        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2375        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2376        logio->port_id[1] = sp->fcport->d_id.b.area;
2377        logio->port_id[2] = sp->fcport->d_id.b.domain;
2378        logio->vp_index = sp->vha->vp_idx;
2379}
2380
2381static void
2382qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2383{
2384        struct qla_hw_data *ha = sp->vha->hw;
2385        struct srb_iocb *lio = &sp->u.iocb_cmd;
2386        uint16_t opts;
2387
2388        mbx->entry_type = MBX_IOCB_TYPE;
2389        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2390        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2391        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2392        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2393        if (HAS_EXTENDED_IDS(ha)) {
2394                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2395                mbx->mb10 = cpu_to_le16(opts);
2396        } else {
2397                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2398        }
2399        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2400        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2401            sp->fcport->d_id.b.al_pa);
2402        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2403}
2404
2405static void
2406qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2407{
2408        u16 control_flags = LCF_COMMAND_LOGO;
2409        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2410
2411        if (sp->fcport->explicit_logout) {
2412                control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2413        } else {
2414                control_flags |= LCF_IMPL_LOGO;
2415
2416                if (!sp->fcport->keep_nport_handle)
2417                        control_flags |= LCF_FREE_NPORT;
2418        }
2419
2420        logio->control_flags = cpu_to_le16(control_flags);
2421        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2422        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2423        logio->port_id[1] = sp->fcport->d_id.b.area;
2424        logio->port_id[2] = sp->fcport->d_id.b.domain;
2425        logio->vp_index = sp->vha->vp_idx;
2426}
2427
2428static void
2429qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2430{
2431        struct qla_hw_data *ha = sp->vha->hw;
2432
2433        mbx->entry_type = MBX_IOCB_TYPE;
2434        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2435        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2436        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2437            cpu_to_le16(sp->fcport->loop_id) :
2438            cpu_to_le16(sp->fcport->loop_id << 8);
2439        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2440        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2441            sp->fcport->d_id.b.al_pa);
2442        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2443        /* Implicit: mbx->mbx10 = 0. */
2444}
2445
2446static void
2447qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2448{
2449        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2450        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2451        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2452        logio->vp_index = sp->vha->vp_idx;
2453}
2454
2455static void
2456qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2457{
2458        struct qla_hw_data *ha = sp->vha->hw;
2459
2460        mbx->entry_type = MBX_IOCB_TYPE;
2461        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2462        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2463        if (HAS_EXTENDED_IDS(ha)) {
2464                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2465                mbx->mb10 = cpu_to_le16(BIT_0);
2466        } else {
2467                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2468        }
2469        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2470        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2471        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2472        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2473        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2474}
2475
2476static void
2477qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2478{
2479        uint32_t flags;
2480        uint64_t lun;
2481        struct fc_port *fcport = sp->fcport;
2482        scsi_qla_host_t *vha = fcport->vha;
2483        struct qla_hw_data *ha = vha->hw;
2484        struct srb_iocb *iocb = &sp->u.iocb_cmd;
2485        struct req_que *req = vha->req;
2486
2487        flags = iocb->u.tmf.flags;
2488        lun = iocb->u.tmf.lun;
2489
2490        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2491        tsk->entry_count = 1;
2492        tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2493        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2494        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2495        tsk->control_flags = cpu_to_le32(flags);
2496        tsk->port_id[0] = fcport->d_id.b.al_pa;
2497        tsk->port_id[1] = fcport->d_id.b.area;
2498        tsk->port_id[2] = fcport->d_id.b.domain;
2499        tsk->vp_index = fcport->vha->vp_idx;
2500
2501        if (flags == TCF_LUN_RESET) {
2502                int_to_scsilun(lun, &tsk->lun);
2503                host_to_fcp_swap((uint8_t *)&tsk->lun,
2504                        sizeof(tsk->lun));
2505        }
2506}
2507
2508void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2509{
2510        timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2511        sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2512        sp->free = qla2x00_sp_free;
2513        if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2514                init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2515        sp->start_timer = 1;
2516}
2517
2518static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2519{
2520        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2521
2522        kfree(sp->fcport);
2523
2524        if (elsio->u.els_logo.els_logo_pyld)
2525                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2526                    elsio->u.els_logo.els_logo_pyld,
2527                    elsio->u.els_logo.els_logo_pyld_dma);
2528
2529        del_timer(&elsio->timer);
2530        qla2x00_rel_sp(sp);
2531}
2532
2533static void
2534qla2x00_els_dcmd_iocb_timeout(void *data)
2535{
2536        srb_t *sp = data;
2537        fc_port_t *fcport = sp->fcport;
2538        struct scsi_qla_host *vha = sp->vha;
2539        struct srb_iocb *lio = &sp->u.iocb_cmd;
2540        unsigned long flags = 0;
2541        int res, h;
2542
2543        ql_dbg(ql_dbg_io, vha, 0x3069,
2544            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2545            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2546            fcport->d_id.b.al_pa);
2547
2548        /* Abort the exchange */
2549        res = qla24xx_async_abort_cmd(sp, false);
2550        if (res) {
2551                ql_dbg(ql_dbg_io, vha, 0x3070,
2552                    "mbx abort_command failed.\n");
2553                spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2554                for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2555                        if (sp->qpair->req->outstanding_cmds[h] == sp) {
2556                                sp->qpair->req->outstanding_cmds[h] = NULL;
2557                                break;
2558                        }
2559                }
2560                spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2561                complete(&lio->u.els_logo.comp);
2562        } else {
2563                ql_dbg(ql_dbg_io, vha, 0x3071,
2564                    "mbx abort_command success.\n");
2565        }
2566}
2567
2568static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2569{
2570        fc_port_t *fcport = sp->fcport;
2571        struct srb_iocb *lio = &sp->u.iocb_cmd;
2572        struct scsi_qla_host *vha = sp->vha;
2573
2574        ql_dbg(ql_dbg_io, vha, 0x3072,
2575            "%s hdl=%x, portid=%02x%02x%02x done\n",
2576            sp->name, sp->handle, fcport->d_id.b.domain,
2577            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2578
2579        complete(&lio->u.els_logo.comp);
2580}
2581
2582int
2583qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2584    port_id_t remote_did)
2585{
2586        srb_t *sp;
2587        fc_port_t *fcport = NULL;
2588        struct srb_iocb *elsio = NULL;
2589        struct qla_hw_data *ha = vha->hw;
2590        struct els_logo_payload logo_pyld;
2591        int rval = QLA_SUCCESS;
2592
2593        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2594        if (!fcport) {
2595               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2596               return -ENOMEM;
2597        }
2598
2599        /* Alloc SRB structure */
2600        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2601        if (!sp) {
2602                kfree(fcport);
2603                ql_log(ql_log_info, vha, 0x70e6,
2604                 "SRB allocation failed\n");
2605                return -ENOMEM;
2606        }
2607
2608        elsio = &sp->u.iocb_cmd;
2609        fcport->loop_id = 0xFFFF;
2610        fcport->d_id.b.domain = remote_did.b.domain;
2611        fcport->d_id.b.area = remote_did.b.area;
2612        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2613
2614        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2615            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2616
2617        sp->type = SRB_ELS_DCMD;
2618        sp->name = "ELS_DCMD";
2619        sp->fcport = fcport;
2620        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2621        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2622        init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2623        sp->done = qla2x00_els_dcmd_sp_done;
2624        sp->free = qla2x00_els_dcmd_sp_free;
2625
2626        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2627                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2628                            GFP_KERNEL);
2629
2630        if (!elsio->u.els_logo.els_logo_pyld) {
2631                sp->free(sp);
2632                return QLA_FUNCTION_FAILED;
2633        }
2634
2635        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2636
2637        elsio->u.els_logo.els_cmd = els_opcode;
2638        logo_pyld.opcode = els_opcode;
2639        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2640        logo_pyld.s_id[1] = vha->d_id.b.area;
2641        logo_pyld.s_id[2] = vha->d_id.b.domain;
2642        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2643        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2644
2645        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2646            sizeof(struct els_logo_payload));
2647        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2648        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2649                       elsio->u.els_logo.els_logo_pyld,
2650                       sizeof(*elsio->u.els_logo.els_logo_pyld));
2651
2652        rval = qla2x00_start_sp(sp);
2653        if (rval != QLA_SUCCESS) {
2654                sp->free(sp);
2655                return QLA_FUNCTION_FAILED;
2656        }
2657
2658        ql_dbg(ql_dbg_io, vha, 0x3074,
2659            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2660            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2661            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2662
2663        wait_for_completion(&elsio->u.els_logo.comp);
2664
2665        sp->free(sp);
2666        return rval;
2667}
2668
2669static void
2670qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2671{
2672        scsi_qla_host_t *vha = sp->vha;
2673        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2674
2675        els_iocb->entry_type = ELS_IOCB_TYPE;
2676        els_iocb->entry_count = 1;
2677        els_iocb->sys_define = 0;
2678        els_iocb->entry_status = 0;
2679        els_iocb->handle = sp->handle;
2680        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2681        els_iocb->tx_dsd_count = 1;
2682        els_iocb->vp_index = vha->vp_idx;
2683        els_iocb->sof_type = EST_SOFI3;
2684        els_iocb->rx_dsd_count = 0;
2685        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2686
2687        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2688        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2689        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2690        /* For SID the byte order is different than DID */
2691        els_iocb->s_id[1] = vha->d_id.b.al_pa;
2692        els_iocb->s_id[2] = vha->d_id.b.area;
2693        els_iocb->s_id[0] = vha->d_id.b.domain;
2694
2695        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2696                els_iocb->control_flags = 0;
2697                els_iocb->tx_byte_count = els_iocb->tx_len =
2698                        cpu_to_le32(sizeof(struct els_plogi_payload));
2699                put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2700                                   &els_iocb->tx_address);
2701                els_iocb->rx_dsd_count = 1;
2702                els_iocb->rx_byte_count = els_iocb->rx_len =
2703                        cpu_to_le32(sizeof(struct els_plogi_payload));
2704                put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2705                                   &els_iocb->rx_address);
2706
2707                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2708                    "PLOGI ELS IOCB:\n");
2709                ql_dump_buffer(ql_log_info, vha, 0x0109,
2710                    (uint8_t *)els_iocb,
2711                    sizeof(*els_iocb));
2712        } else {
2713                els_iocb->control_flags = 1 << 13;
2714                els_iocb->tx_byte_count =
2715                        cpu_to_le32(sizeof(struct els_logo_payload));
2716                put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2717                                   &els_iocb->tx_address);
2718                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2719
2720                els_iocb->rx_byte_count = 0;
2721                els_iocb->rx_address = 0;
2722                els_iocb->rx_len = 0;
2723                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2724                       "LOGO ELS IOCB:");
2725                ql_dump_buffer(ql_log_info, vha, 0x010b,
2726                               els_iocb,
2727                               sizeof(*els_iocb));
2728        }
2729
2730        sp->vha->qla_stats.control_requests++;
2731}
2732
2733static void
2734qla2x00_els_dcmd2_iocb_timeout(void *data)
2735{
2736        srb_t *sp = data;
2737        fc_port_t *fcport = sp->fcport;
2738        struct scsi_qla_host *vha = sp->vha;
2739        unsigned long flags = 0;
2740        int res, h;
2741
2742        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2743            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2744            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2745
2746        /* Abort the exchange */
2747        res = qla24xx_async_abort_cmd(sp, false);
2748        ql_dbg(ql_dbg_io, vha, 0x3070,
2749            "mbx abort_command %s\n",
2750            (res == QLA_SUCCESS) ? "successful" : "failed");
2751        if (res) {
2752                spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2753                for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2754                        if (sp->qpair->req->outstanding_cmds[h] == sp) {
2755                                sp->qpair->req->outstanding_cmds[h] = NULL;
2756                                break;
2757                        }
2758                }
2759                spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2760                sp->done(sp, QLA_FUNCTION_TIMEOUT);
2761        }
2762}
2763
2764void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2765{
2766        if (els_plogi->els_plogi_pyld)
2767                dma_free_coherent(&vha->hw->pdev->dev,
2768                                  els_plogi->tx_size,
2769                                  els_plogi->els_plogi_pyld,
2770                                  els_plogi->els_plogi_pyld_dma);
2771
2772        if (els_plogi->els_resp_pyld)
2773                dma_free_coherent(&vha->hw->pdev->dev,
2774                                  els_plogi->rx_size,
2775                                  els_plogi->els_resp_pyld,
2776                                  els_plogi->els_resp_pyld_dma);
2777}
2778
2779static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2780{
2781        fc_port_t *fcport = sp->fcport;
2782        struct srb_iocb *lio = &sp->u.iocb_cmd;
2783        struct scsi_qla_host *vha = sp->vha;
2784        struct event_arg ea;
2785        struct qla_work_evt *e;
2786        struct fc_port *conflict_fcport;
2787        port_id_t cid;  /* conflict Nport id */
2788        u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2789        u16 lid;
2790
2791        ql_dbg(ql_dbg_disc, vha, 0x3072,
2792            "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2793            sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2794
2795        fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2796        del_timer(&sp->u.iocb_cmd.timer);
2797
2798        if (sp->flags & SRB_WAKEUP_ON_COMP)
2799                complete(&lio->u.els_plogi.comp);
2800        else {
2801                switch (fw_status[0]) {
2802                case CS_DATA_UNDERRUN:
2803                case CS_COMPLETE:
2804                        memset(&ea, 0, sizeof(ea));
2805                        ea.fcport = fcport;
2806                        ea.rc = res;
2807                        qla_handle_els_plogi_done(vha, &ea);
2808                        break;
2809
2810                case CS_IOCB_ERROR:
2811                        switch (fw_status[1]) {
2812                        case LSC_SCODE_PORTID_USED:
2813                                lid = fw_status[2] & 0xffff;
2814                                qlt_find_sess_invalidate_other(vha,
2815                                    wwn_to_u64(fcport->port_name),
2816                                    fcport->d_id, lid, &conflict_fcport);
2817                                if (conflict_fcport) {
2818                                        /*
2819                                         * Another fcport shares the same
2820                                         * loop_id & nport id; conflict
2821                                         * fcport needs to finish cleanup
2822                                         * before this fcport can proceed
2823                                         * to login.
2824                                         */
2825                                        conflict_fcport->conflict = fcport;
2826                                        fcport->login_pause = 1;
2827                                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
2828                                            "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2829                                            __func__, __LINE__,
2830                                            fcport->port_name,
2831                                            fcport->d_id.b24, lid);
2832                                } else {
2833                                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
2834                                            "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2835                                            __func__, __LINE__,
2836                                            fcport->port_name,
2837                                            fcport->d_id.b24, lid);
2838                                        qla2x00_clear_loop_id(fcport);
2839                                        set_bit(lid, vha->hw->loop_id_map);
2840                                        fcport->loop_id = lid;
2841                                        fcport->keep_nport_handle = 0;
2842                                        qlt_schedule_sess_for_deletion(fcport);
2843                                }
2844                                break;
2845
2846                        case LSC_SCODE_NPORT_USED:
2847                                cid.b.domain = (fw_status[2] >> 16) & 0xff;
2848                                cid.b.area   = (fw_status[2] >>  8) & 0xff;
2849                                cid.b.al_pa  = fw_status[2] & 0xff;
2850                                cid.b.rsvd_1 = 0;
2851
2852                                ql_dbg(ql_dbg_disc, vha, 0x20ec,
2853                                    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2854                                    __func__, __LINE__, fcport->port_name,
2855                                    fcport->loop_id, cid.b24);
2856                                set_bit(fcport->loop_id,
2857                                    vha->hw->loop_id_map);
2858                                fcport->loop_id = FC_NO_LOOP_ID;
2859                                qla24xx_post_gnl_work(vha, fcport);
2860                                break;
2861
2862                        case LSC_SCODE_NOXCB:
2863                                vha->hw->exch_starvation++;
2864                                if (vha->hw->exch_starvation > 5) {
2865                                        ql_log(ql_log_warn, vha, 0xd046,
2866                                            "Exchange starvation. Resetting RISC\n");
2867                                        vha->hw->exch_starvation = 0;
2868                                        set_bit(ISP_ABORT_NEEDED,
2869                                            &vha->dpc_flags);
2870                                        qla2xxx_wake_dpc(vha);
2871                                }
2872                                /* fall through */
2873                        default:
2874                                ql_dbg(ql_dbg_disc, vha, 0x20eb,
2875                                    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2876                                    __func__, sp->fcport->port_name,
2877                                    fw_status[0], fw_status[1], fw_status[2]);
2878
2879                                fcport->flags &= ~FCF_ASYNC_SENT;
2880                                qla2x00_set_fcport_disc_state(fcport,
2881                                    DSC_LOGIN_FAILED);
2882                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2883                                break;
2884                        }
2885                        break;
2886
2887                default:
2888                        ql_dbg(ql_dbg_disc, vha, 0x20eb,
2889                            "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2890                            __func__, sp->fcport->port_name,
2891                            fw_status[0], fw_status[1], fw_status[2]);
2892
2893                        sp->fcport->flags &= ~FCF_ASYNC_SENT;
2894                        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2895                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2896                        break;
2897                }
2898
2899                e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2900                if (!e) {
2901                        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2902
2903                        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2904                        sp->free(sp);
2905                        return;
2906                }
2907                e->u.iosb.sp = sp;
2908                qla2x00_post_work(vha, e);
2909        }
2910}
2911
2912int
2913qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2914    fc_port_t *fcport, bool wait)
2915{
2916        srb_t *sp;
2917        struct srb_iocb *elsio = NULL;
2918        struct qla_hw_data *ha = vha->hw;
2919        int rval = QLA_SUCCESS;
2920        void    *ptr, *resp_ptr;
2921
2922        /* Alloc SRB structure */
2923        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2924        if (!sp) {
2925                ql_log(ql_log_info, vha, 0x70e6,
2926                 "SRB allocation failed\n");
2927                fcport->flags &= ~FCF_ASYNC_ACTIVE;
2928                return -ENOMEM;
2929        }
2930
2931        fcport->flags |= FCF_ASYNC_SENT;
2932        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
2933        elsio = &sp->u.iocb_cmd;
2934        ql_dbg(ql_dbg_io, vha, 0x3073,
2935            "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2936
2937        sp->type = SRB_ELS_DCMD;
2938        sp->name = "ELS_DCMD";
2939        sp->fcport = fcport;
2940
2941        elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2942        init_completion(&elsio->u.els_plogi.comp);
2943        if (wait)
2944                sp->flags = SRB_WAKEUP_ON_COMP;
2945
2946        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2947
2948        sp->done = qla2x00_els_dcmd2_sp_done;
2949        elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2950
2951        ptr = elsio->u.els_plogi.els_plogi_pyld =
2952            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2953                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2954
2955        if (!elsio->u.els_plogi.els_plogi_pyld) {
2956                rval = QLA_FUNCTION_FAILED;
2957                goto out;
2958        }
2959
2960        resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2961            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2962                &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2963
2964        if (!elsio->u.els_plogi.els_resp_pyld) {
2965                rval = QLA_FUNCTION_FAILED;
2966                goto out;
2967        }
2968
2969        ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2970
2971        memset(ptr, 0, sizeof(struct els_plogi_payload));
2972        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2973        memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2974            &ha->plogi_els_payld.data,
2975            sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2976
2977        elsio->u.els_plogi.els_cmd = els_opcode;
2978        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2979
2980        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2981        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2982            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2983            sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2984
2985        rval = qla2x00_start_sp(sp);
2986        if (rval != QLA_SUCCESS) {
2987                rval = QLA_FUNCTION_FAILED;
2988        } else {
2989                ql_dbg(ql_dbg_disc, vha, 0x3074,
2990                    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2991                    sp->name, sp->handle, fcport->loop_id,
2992                    fcport->d_id.b24, vha->d_id.b24);
2993        }
2994
2995        if (wait) {
2996                wait_for_completion(&elsio->u.els_plogi.comp);
2997
2998                if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2999                        rval = QLA_FUNCTION_FAILED;
3000        } else {
3001                goto done;
3002        }
3003
3004out:
3005        fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3006        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3007        sp->free(sp);
3008done:
3009        return rval;
3010}
3011
3012static void
3013qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3014{
3015        struct bsg_job *bsg_job = sp->u.bsg_job;
3016        struct fc_bsg_request *bsg_request = bsg_job->request;
3017
3018        els_iocb->entry_type = ELS_IOCB_TYPE;
3019        els_iocb->entry_count = 1;
3020        els_iocb->sys_define = 0;
3021        els_iocb->entry_status = 0;
3022        els_iocb->handle = sp->handle;
3023        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3024        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3025        els_iocb->vp_index = sp->vha->vp_idx;
3026        els_iocb->sof_type = EST_SOFI3;
3027        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3028
3029        els_iocb->opcode =
3030            sp->type == SRB_ELS_CMD_RPT ?
3031            bsg_request->rqst_data.r_els.els_code :
3032            bsg_request->rqst_data.h_els.command_code;
3033        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3034        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
3035        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3036        els_iocb->control_flags = 0;
3037        els_iocb->rx_byte_count =
3038            cpu_to_le32(bsg_job->reply_payload.payload_len);
3039        els_iocb->tx_byte_count =
3040            cpu_to_le32(bsg_job->request_payload.payload_len);
3041
3042        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3043                           &els_iocb->tx_address);
3044        els_iocb->tx_len = cpu_to_le32(sg_dma_len
3045            (bsg_job->request_payload.sg_list));
3046
3047        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3048                           &els_iocb->rx_address);
3049        els_iocb->rx_len = cpu_to_le32(sg_dma_len
3050            (bsg_job->reply_payload.sg_list));
3051
3052        sp->vha->qla_stats.control_requests++;
3053}
3054
3055static void
3056qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3057{
3058        uint16_t        avail_dsds;
3059        struct dsd64    *cur_dsd;
3060        struct scatterlist *sg;
3061        int index;
3062        uint16_t tot_dsds;
3063        scsi_qla_host_t *vha = sp->vha;
3064        struct qla_hw_data *ha = vha->hw;
3065        struct bsg_job *bsg_job = sp->u.bsg_job;
3066        int entry_count = 1;
3067
3068        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3069        ct_iocb->entry_type = CT_IOCB_TYPE;
3070        ct_iocb->entry_status = 0;
3071        ct_iocb->handle1 = sp->handle;
3072        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3073        ct_iocb->status = cpu_to_le16(0);
3074        ct_iocb->control_flags = cpu_to_le16(0);
3075        ct_iocb->timeout = 0;
3076        ct_iocb->cmd_dsd_count =
3077            cpu_to_le16(bsg_job->request_payload.sg_cnt);
3078        ct_iocb->total_dsd_count =
3079            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3080        ct_iocb->req_bytecount =
3081            cpu_to_le32(bsg_job->request_payload.payload_len);
3082        ct_iocb->rsp_bytecount =
3083            cpu_to_le32(bsg_job->reply_payload.payload_len);
3084
3085        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3086                           &ct_iocb->req_dsd.address);
3087        ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3088
3089        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3090                           &ct_iocb->rsp_dsd.address);
3091        ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3092
3093        avail_dsds = 1;
3094        cur_dsd = &ct_iocb->rsp_dsd;
3095        index = 0;
3096        tot_dsds = bsg_job->reply_payload.sg_cnt;
3097
3098        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3099                cont_a64_entry_t *cont_pkt;
3100
3101                /* Allocate additional continuation packets? */
3102                if (avail_dsds == 0) {
3103                        /*
3104                        * Five DSDs are available in the Cont.
3105                        * Type 1 IOCB.
3106                               */
3107                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3108                            vha->hw->req_q_map[0]);
3109                        cur_dsd = cont_pkt->dsd;
3110                        avail_dsds = 5;
3111                        entry_count++;
3112                }
3113
3114                append_dsd64(&cur_dsd, sg);
3115                avail_dsds--;
3116        }
3117        ct_iocb->entry_count = entry_count;
3118
3119        sp->vha->qla_stats.control_requests++;
3120}
3121
3122static void
3123qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3124{
3125        uint16_t        avail_dsds;
3126        struct dsd64    *cur_dsd;
3127        struct scatterlist *sg;
3128        int index;
3129        uint16_t cmd_dsds, rsp_dsds;
3130        scsi_qla_host_t *vha = sp->vha;
3131        struct qla_hw_data *ha = vha->hw;
3132        struct bsg_job *bsg_job = sp->u.bsg_job;
3133        int entry_count = 1;
3134        cont_a64_entry_t *cont_pkt = NULL;
3135
3136        ct_iocb->entry_type = CT_IOCB_TYPE;
3137        ct_iocb->entry_status = 0;
3138        ct_iocb->sys_define = 0;
3139        ct_iocb->handle = sp->handle;
3140
3141        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3142        ct_iocb->vp_index = sp->vha->vp_idx;
3143        ct_iocb->comp_status = cpu_to_le16(0);
3144
3145        cmd_dsds = bsg_job->request_payload.sg_cnt;
3146        rsp_dsds = bsg_job->reply_payload.sg_cnt;
3147
3148        ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3149        ct_iocb->timeout = 0;
3150        ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3151        ct_iocb->cmd_byte_count =
3152            cpu_to_le32(bsg_job->request_payload.payload_len);
3153
3154        avail_dsds = 2;
3155        cur_dsd = ct_iocb->dsd;
3156        index = 0;
3157
3158        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3159                /* Allocate additional continuation packets? */
3160                if (avail_dsds == 0) {
3161                        /*
3162                         * Five DSDs are available in the Cont.
3163                         * Type 1 IOCB.
3164                         */
3165                        cont_pkt = qla2x00_prep_cont_type1_iocb(
3166                            vha, ha->req_q_map[0]);
3167                        cur_dsd = cont_pkt->dsd;
3168                        avail_dsds = 5;
3169                        entry_count++;
3170                }
3171
3172                append_dsd64(&cur_dsd, sg);
3173                avail_dsds--;
3174        }
3175
3176        index = 0;
3177
3178        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3179                /* Allocate additional continuation packets? */
3180                if (avail_dsds == 0) {
3181                        /*
3182                        * Five DSDs are available in the Cont.
3183                        * Type 1 IOCB.
3184                               */
3185                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3186                            ha->req_q_map[0]);
3187                        cur_dsd = cont_pkt->dsd;
3188                        avail_dsds = 5;
3189                        entry_count++;
3190                }
3191
3192                append_dsd64(&cur_dsd, sg);
3193                avail_dsds--;
3194        }
3195        ct_iocb->entry_count = entry_count;
3196}
3197
3198/*
3199 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3200 * @sp: command to send to the ISP
3201 *
3202 * Returns non-zero if a failure occurred, else zero.
3203 */
3204int
3205qla82xx_start_scsi(srb_t *sp)
3206{
3207        int             nseg;
3208        unsigned long   flags;
3209        struct scsi_cmnd *cmd;
3210        uint32_t        *clr_ptr;
3211        uint32_t        handle;
3212        uint16_t        cnt;
3213        uint16_t        req_cnt;
3214        uint16_t        tot_dsds;
3215        struct device_reg_82xx __iomem *reg;
3216        uint32_t dbval;
3217        uint32_t *fcp_dl;
3218        uint8_t additional_cdb_len;
3219        struct ct6_dsd *ctx;
3220        struct scsi_qla_host *vha = sp->vha;
3221        struct qla_hw_data *ha = vha->hw;
3222        struct req_que *req = NULL;
3223        struct rsp_que *rsp = NULL;
3224
3225        /* Setup device pointers. */
3226        reg = &ha->iobase->isp82;
3227        cmd = GET_CMD_SP(sp);
3228        req = vha->req;
3229        rsp = ha->rsp_q_map[0];
3230
3231        /* So we know we haven't pci_map'ed anything yet */
3232        tot_dsds = 0;
3233
3234        dbval = 0x04 | (ha->portnum << 5);
3235
3236        /* Send marker if required */
3237        if (vha->marker_needed != 0) {
3238                if (qla2x00_marker(vha, ha->base_qpair,
3239                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3240                        ql_log(ql_log_warn, vha, 0x300c,
3241                            "qla2x00_marker failed for cmd=%p.\n", cmd);
3242                        return QLA_FUNCTION_FAILED;
3243                }
3244                vha->marker_needed = 0;
3245        }
3246
3247        /* Acquire ring specific lock */
3248        spin_lock_irqsave(&ha->hardware_lock, flags);
3249
3250        handle = qla2xxx_get_next_handle(req);
3251        if (handle == 0)
3252                goto queuing_error;
3253
3254        /* Map the sg table so we have an accurate count of sg entries needed */
3255        if (scsi_sg_count(cmd)) {
3256                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3257                    scsi_sg_count(cmd), cmd->sc_data_direction);
3258                if (unlikely(!nseg))
3259                        goto queuing_error;
3260        } else
3261                nseg = 0;
3262
3263        tot_dsds = nseg;
3264
3265        if (tot_dsds > ql2xshiftctondsd) {
3266                struct cmd_type_6 *cmd_pkt;
3267                uint16_t more_dsd_lists = 0;
3268                struct dsd_dma *dsd_ptr;
3269                uint16_t i;
3270
3271                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3272                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3273                        ql_dbg(ql_dbg_io, vha, 0x300d,
3274                            "Num of DSD list %d is than %d for cmd=%p.\n",
3275                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3276                            cmd);
3277                        goto queuing_error;
3278                }
3279
3280                if (more_dsd_lists <= ha->gbl_dsd_avail)
3281                        goto sufficient_dsds;
3282                else
3283                        more_dsd_lists -= ha->gbl_dsd_avail;
3284
3285                for (i = 0; i < more_dsd_lists; i++) {
3286                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3287                        if (!dsd_ptr) {
3288                                ql_log(ql_log_fatal, vha, 0x300e,
3289                                    "Failed to allocate memory for dsd_dma "
3290                                    "for cmd=%p.\n", cmd);
3291                                goto queuing_error;
3292                        }
3293
3294                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3295                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3296                        if (!dsd_ptr->dsd_addr) {
3297                                kfree(dsd_ptr);
3298                                ql_log(ql_log_fatal, vha, 0x300f,
3299                                    "Failed to allocate memory for dsd_addr "
3300                                    "for cmd=%p.\n", cmd);
3301                                goto queuing_error;
3302                        }
3303                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3304                        ha->gbl_dsd_avail++;
3305                }
3306
3307sufficient_dsds:
3308                req_cnt = 1;
3309
3310                if (req->cnt < (req_cnt + 2)) {
3311                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3312                                &reg->req_q_out[0]);
3313                        if (req->ring_index < cnt)
3314                                req->cnt = cnt - req->ring_index;
3315                        else
3316                                req->cnt = req->length -
3317                                        (req->ring_index - cnt);
3318                        if (req->cnt < (req_cnt + 2))
3319                                goto queuing_error;
3320                }
3321
3322                ctx = sp->u.scmd.ct6_ctx =
3323                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3324                if (!ctx) {
3325                        ql_log(ql_log_fatal, vha, 0x3010,
3326                            "Failed to allocate ctx for cmd=%p.\n", cmd);
3327                        goto queuing_error;
3328                }
3329
3330                memset(ctx, 0, sizeof(struct ct6_dsd));
3331                ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3332                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3333                if (!ctx->fcp_cmnd) {
3334                        ql_log(ql_log_fatal, vha, 0x3011,
3335                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3336                        goto queuing_error;
3337                }
3338
3339                /* Initialize the DSD list and dma handle */
3340                INIT_LIST_HEAD(&ctx->dsd_list);
3341                ctx->dsd_use_cnt = 0;
3342
3343                if (cmd->cmd_len > 16) {
3344                        additional_cdb_len = cmd->cmd_len - 16;
3345                        if ((cmd->cmd_len % 4) != 0) {
3346                                /* SCSI command bigger than 16 bytes must be
3347                                 * multiple of 4
3348                                 */
3349                                ql_log(ql_log_warn, vha, 0x3012,
3350                                    "scsi cmd len %d not multiple of 4 "
3351                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
3352                                goto queuing_error_fcp_cmnd;
3353                        }
3354                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3355                } else {
3356                        additional_cdb_len = 0;
3357                        ctx->fcp_cmnd_len = 12 + 16 + 4;
3358                }
3359
3360                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3361                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3362
3363                /* Zero out remaining portion of packet. */
3364                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3365                clr_ptr = (uint32_t *)cmd_pkt + 2;
3366                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3367                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3368
3369                /* Set NPORT-ID and LUN number*/
3370                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3371                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3372                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3373                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3374                cmd_pkt->vp_index = sp->vha->vp_idx;
3375
3376                /* Build IOCB segments */
3377                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3378                        goto queuing_error_fcp_cmnd;
3379
3380                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3381                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3382
3383                /* build FCP_CMND IU */
3384                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3385                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3386
3387                if (cmd->sc_data_direction == DMA_TO_DEVICE)
3388                        ctx->fcp_cmnd->additional_cdb_len |= 1;
3389                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3390                        ctx->fcp_cmnd->additional_cdb_len |= 2;
3391
3392                /* Populate the FCP_PRIO. */
3393                if (ha->flags.fcp_prio_enabled)
3394                        ctx->fcp_cmnd->task_attribute |=
3395                            sp->fcport->fcp_prio << 3;
3396
3397                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3398
3399                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3400                    additional_cdb_len);
3401                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3402
3403                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3404                put_unaligned_le64(ctx->fcp_cmnd_dma,
3405                                   &cmd_pkt->fcp_cmnd_dseg_address);
3406
3407                sp->flags |= SRB_FCP_CMND_DMA_VALID;
3408                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3409                /* Set total data segment count. */
3410                cmd_pkt->entry_count = (uint8_t)req_cnt;
3411                /* Specify response queue number where
3412                 * completion should happen
3413                 */
3414                cmd_pkt->entry_status = (uint8_t) rsp->id;
3415        } else {
3416                struct cmd_type_7 *cmd_pkt;
3417
3418                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3419                if (req->cnt < (req_cnt + 2)) {
3420                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3421                            &reg->req_q_out[0]);
3422                        if (req->ring_index < cnt)
3423                                req->cnt = cnt - req->ring_index;
3424                        else
3425                                req->cnt = req->length -
3426                                        (req->ring_index - cnt);
3427                }
3428                if (req->cnt < (req_cnt + 2))
3429                        goto queuing_error;
3430
3431                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3432                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3433
3434                /* Zero out remaining portion of packet. */
3435                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3436                clr_ptr = (uint32_t *)cmd_pkt + 2;
3437                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3438                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3439
3440                /* Set NPORT-ID and LUN number*/
3441                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3442                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3443                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3444                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3445                cmd_pkt->vp_index = sp->vha->vp_idx;
3446
3447                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3448                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3449                    sizeof(cmd_pkt->lun));
3450
3451                /* Populate the FCP_PRIO. */
3452                if (ha->flags.fcp_prio_enabled)
3453                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3454
3455                /* Load SCSI command packet. */
3456                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3457                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3458
3459                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3460
3461                /* Build IOCB segments */
3462                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3463
3464                /* Set total data segment count. */
3465                cmd_pkt->entry_count = (uint8_t)req_cnt;
3466                /* Specify response queue number where
3467                 * completion should happen.
3468                 */
3469                cmd_pkt->entry_status = (uint8_t) rsp->id;
3470
3471        }
3472        /* Build command packet. */
3473        req->current_outstanding_cmd = handle;
3474        req->outstanding_cmds[handle] = sp;
3475        sp->handle = handle;
3476        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3477        req->cnt -= req_cnt;
3478        wmb();
3479
3480        /* Adjust ring index. */
3481        req->ring_index++;
3482        if (req->ring_index == req->length) {
3483                req->ring_index = 0;
3484                req->ring_ptr = req->ring;
3485        } else
3486                req->ring_ptr++;
3487
3488        sp->flags |= SRB_DMA_VALID;
3489
3490        /* Set chip new ring index. */
3491        /* write, read and verify logic */
3492        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3493        if (ql2xdbwr)
3494                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3495        else {
3496                WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3497                wmb();
3498                while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3499                        WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3500                        wmb();
3501                }
3502        }
3503
3504        /* Manage unprocessed RIO/ZIO commands in response queue. */
3505        if (vha->flags.process_response_queue &&
3506            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3507                qla24xx_process_response_queue(vha, rsp);
3508
3509        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3510        return QLA_SUCCESS;
3511
3512queuing_error_fcp_cmnd:
3513        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3514queuing_error:
3515        if (tot_dsds)
3516                scsi_dma_unmap(cmd);
3517
3518        if (sp->u.scmd.crc_ctx) {
3519                mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3520                sp->u.scmd.crc_ctx = NULL;
3521        }
3522        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3523
3524        return QLA_FUNCTION_FAILED;
3525}
3526
3527static void
3528qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3529{
3530        struct srb_iocb *aio = &sp->u.iocb_cmd;
3531        scsi_qla_host_t *vha = sp->vha;
3532        struct req_que *req = sp->qpair->req;
3533
3534        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3535        abt_iocb->entry_type = ABORT_IOCB_TYPE;
3536        abt_iocb->entry_count = 1;
3537        abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3538        if (sp->fcport) {
3539                abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3540                abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3541                abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3542                abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3543        }
3544        abt_iocb->handle_to_abort =
3545            cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3546                                    aio->u.abt.cmd_hndl));
3547        abt_iocb->vp_index = vha->vp_idx;
3548        abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3549        /* Send the command to the firmware */
3550        wmb();
3551}
3552
3553static void
3554qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3555{
3556        int i, sz;
3557
3558        mbx->entry_type = MBX_IOCB_TYPE;
3559        mbx->handle = sp->handle;
3560        sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3561
3562        for (i = 0; i < sz; i++)
3563                mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3564}
3565
3566static void
3567qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3568{
3569        sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3570        qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3571        ct_pkt->handle = sp->handle;
3572}
3573
3574static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3575        struct nack_to_isp *nack)
3576{
3577        struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3578
3579        nack->entry_type = NOTIFY_ACK_TYPE;
3580        nack->entry_count = 1;
3581        nack->ox_id = ntfy->ox_id;
3582
3583        nack->u.isp24.handle = sp->handle;
3584        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3585        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3586                nack->u.isp24.flags = ntfy->u.isp24.flags &
3587                        cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3588        }
3589        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3590        nack->u.isp24.status = ntfy->u.isp24.status;
3591        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3592        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3593        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3594        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3595        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3596        nack->u.isp24.srr_flags = 0;
3597        nack->u.isp24.srr_reject_code = 0;
3598        nack->u.isp24.srr_reject_code_expl = 0;
3599        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3600}
3601
3602/*
3603 * Build NVME LS request
3604 */
3605static int
3606qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3607{
3608        struct srb_iocb *nvme;
3609        int     rval = QLA_SUCCESS;
3610
3611        nvme = &sp->u.iocb_cmd;
3612        cmd_pkt->entry_type = PT_LS4_REQUEST;
3613        cmd_pkt->entry_count = 1;
3614        cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3615
3616        cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3617        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3618        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3619
3620        cmd_pkt->tx_dseg_count = 1;
3621        cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3622        cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3623        put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3624
3625        cmd_pkt->rx_dseg_count = 1;
3626        cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3627        cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
3628        put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3629
3630        return rval;
3631}
3632
3633static void
3634qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3635{
3636        int map, pos;
3637
3638        vce->entry_type = VP_CTRL_IOCB_TYPE;
3639        vce->handle = sp->handle;
3640        vce->entry_count = 1;
3641        vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3642        vce->vp_count = cpu_to_le16(1);
3643
3644        /*
3645         * index map in firmware starts with 1; decrement index
3646         * this is ok as we never use index 0
3647         */
3648        map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3649        pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3650        vce->vp_idx_map[map] |= 1 << pos;
3651}
3652
3653static void
3654qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3655{
3656        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3657        logio->control_flags =
3658            cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3659
3660        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3661        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3662        logio->port_id[1] = sp->fcport->d_id.b.area;
3663        logio->port_id[2] = sp->fcport->d_id.b.domain;
3664        logio->vp_index = sp->fcport->vha->vp_idx;
3665}
3666
3667int
3668qla2x00_start_sp(srb_t *sp)
3669{
3670        int rval = QLA_SUCCESS;
3671        scsi_qla_host_t *vha = sp->vha;
3672        struct qla_hw_data *ha = vha->hw;
3673        struct qla_qpair *qp = sp->qpair;
3674        void *pkt;
3675        unsigned long flags;
3676
3677        spin_lock_irqsave(qp->qp_lock_ptr, flags);
3678        pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3679        if (!pkt) {
3680                rval = EAGAIN;
3681                ql_log(ql_log_warn, vha, 0x700c,
3682                    "qla2x00_alloc_iocbs failed.\n");
3683                goto done;
3684        }
3685
3686        switch (sp->type) {
3687        case SRB_LOGIN_CMD:
3688                IS_FWI2_CAPABLE(ha) ?
3689                    qla24xx_login_iocb(sp, pkt) :
3690                    qla2x00_login_iocb(sp, pkt);
3691                break;
3692        case SRB_PRLI_CMD:
3693                qla24xx_prli_iocb(sp, pkt);
3694                break;
3695        case SRB_LOGOUT_CMD:
3696                IS_FWI2_CAPABLE(ha) ?
3697                    qla24xx_logout_iocb(sp, pkt) :
3698                    qla2x00_logout_iocb(sp, pkt);
3699                break;
3700        case SRB_ELS_CMD_RPT:
3701        case SRB_ELS_CMD_HST:
3702                qla24xx_els_iocb(sp, pkt);
3703                break;
3704        case SRB_CT_CMD:
3705                IS_FWI2_CAPABLE(ha) ?
3706                    qla24xx_ct_iocb(sp, pkt) :
3707                    qla2x00_ct_iocb(sp, pkt);
3708                break;
3709        case SRB_ADISC_CMD:
3710                IS_FWI2_CAPABLE(ha) ?
3711                    qla24xx_adisc_iocb(sp, pkt) :
3712                    qla2x00_adisc_iocb(sp, pkt);
3713                break;
3714        case SRB_TM_CMD:
3715                IS_QLAFX00(ha) ?
3716                    qlafx00_tm_iocb(sp, pkt) :
3717                    qla24xx_tm_iocb(sp, pkt);
3718                break;
3719        case SRB_FXIOCB_DCMD:
3720        case SRB_FXIOCB_BCMD:
3721                qlafx00_fxdisc_iocb(sp, pkt);
3722                break;
3723        case SRB_NVME_LS:
3724                qla_nvme_ls(sp, pkt);
3725                break;
3726        case SRB_ABT_CMD:
3727                IS_QLAFX00(ha) ?
3728                        qlafx00_abort_iocb(sp, pkt) :
3729                        qla24xx_abort_iocb(sp, pkt);
3730                break;
3731        case SRB_ELS_DCMD:
3732                qla24xx_els_logo_iocb(sp, pkt);
3733                break;
3734        case SRB_CT_PTHRU_CMD:
3735                qla2x00_ctpthru_cmd_iocb(sp, pkt);
3736                break;
3737        case SRB_MB_IOCB:
3738                qla2x00_mb_iocb(sp, pkt);
3739                break;
3740        case SRB_NACK_PLOGI:
3741        case SRB_NACK_PRLI:
3742        case SRB_NACK_LOGO:
3743                qla2x00_send_notify_ack_iocb(sp, pkt);
3744                break;
3745        case SRB_CTRL_VP:
3746                qla25xx_ctrlvp_iocb(sp, pkt);
3747                break;
3748        case SRB_PRLO_CMD:
3749                qla24xx_prlo_iocb(sp, pkt);
3750                break;
3751        default:
3752                break;
3753        }
3754
3755        if (sp->start_timer)
3756                add_timer(&sp->u.iocb_cmd.timer);
3757
3758        wmb();
3759        qla2x00_start_iocbs(vha, qp->req);
3760done:
3761        spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3762        return rval;
3763}
3764
3765static void
3766qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3767                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3768{
3769        uint16_t avail_dsds;
3770        struct dsd64 *cur_dsd;
3771        uint32_t req_data_len = 0;
3772        uint32_t rsp_data_len = 0;
3773        struct scatterlist *sg;
3774        int index;
3775        int entry_count = 1;
3776        struct bsg_job *bsg_job = sp->u.bsg_job;
3777
3778        /*Update entry type to indicate bidir command */
3779        put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3780
3781        /* Set the transfer direction, in this set both flags
3782         * Also set the BD_WRAP_BACK flag, firmware will take care
3783         * assigning DID=SID for outgoing pkts.
3784         */
3785        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3786        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3787        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3788                                                        BD_WRAP_BACK);
3789
3790        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3791        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3792        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3793        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3794
3795        vha->bidi_stats.transfer_bytes += req_data_len;
3796        vha->bidi_stats.io_count++;
3797
3798        vha->qla_stats.output_bytes += req_data_len;
3799        vha->qla_stats.output_requests++;
3800
3801        /* Only one dsd is available for bidirectional IOCB, remaining dsds
3802         * are bundled in continuation iocb
3803         */
3804        avail_dsds = 1;
3805        cur_dsd = &cmd_pkt->fcp_dsd;
3806
3807        index = 0;
3808
3809        for_each_sg(bsg_job->request_payload.sg_list, sg,
3810                                bsg_job->request_payload.sg_cnt, index) {
3811                cont_a64_entry_t *cont_pkt;
3812
3813                /* Allocate additional continuation packets */
3814                if (avail_dsds == 0) {
3815                        /* Continuation type 1 IOCB can accomodate
3816                         * 5 DSDS
3817                         */
3818                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3819                        cur_dsd = cont_pkt->dsd;
3820                        avail_dsds = 5;
3821                        entry_count++;
3822                }
3823                append_dsd64(&cur_dsd, sg);
3824                avail_dsds--;
3825        }
3826        /* For read request DSD will always goes to continuation IOCB
3827         * and follow the write DSD. If there is room on the current IOCB
3828         * then it is added to that IOCB else new continuation IOCB is
3829         * allocated.
3830         */
3831        for_each_sg(bsg_job->reply_payload.sg_list, sg,
3832                                bsg_job->reply_payload.sg_cnt, index) {
3833                cont_a64_entry_t *cont_pkt;
3834
3835                /* Allocate additional continuation packets */
3836                if (avail_dsds == 0) {
3837                        /* Continuation type 1 IOCB can accomodate
3838                         * 5 DSDS
3839                         */
3840                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3841                        cur_dsd = cont_pkt->dsd;
3842                        avail_dsds = 5;
3843                        entry_count++;
3844                }
3845                append_dsd64(&cur_dsd, sg);
3846                avail_dsds--;
3847        }
3848        /* This value should be same as number of IOCB required for this cmd */
3849        cmd_pkt->entry_count = entry_count;
3850}
3851
3852int
3853qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3854{
3855
3856        struct qla_hw_data *ha = vha->hw;
3857        unsigned long flags;
3858        uint32_t handle;
3859        uint16_t req_cnt;
3860        uint16_t cnt;
3861        uint32_t *clr_ptr;
3862        struct cmd_bidir *cmd_pkt = NULL;
3863        struct rsp_que *rsp;
3864        struct req_que *req;
3865        int rval = EXT_STATUS_OK;
3866
3867        rval = QLA_SUCCESS;
3868
3869        rsp = ha->rsp_q_map[0];
3870        req = vha->req;
3871
3872        /* Send marker if required */
3873        if (vha->marker_needed != 0) {
3874                if (qla2x00_marker(vha, ha->base_qpair,
3875                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3876                        return EXT_STATUS_MAILBOX;
3877                vha->marker_needed = 0;
3878        }
3879
3880        /* Acquire ring specific lock */
3881        spin_lock_irqsave(&ha->hardware_lock, flags);
3882
3883        handle = qla2xxx_get_next_handle(req);
3884        if (handle == 0) {
3885                rval = EXT_STATUS_BUSY;
3886                goto queuing_error;
3887        }
3888
3889        /* Calculate number of IOCB required */
3890        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3891
3892        /* Check for room on request queue. */
3893        if (req->cnt < req_cnt + 2) {
3894                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3895                    RD_REG_DWORD_RELAXED(req->req_q_out);
3896                if  (req->ring_index < cnt)
3897                        req->cnt = cnt - req->ring_index;
3898                else
3899                        req->cnt = req->length -
3900                                (req->ring_index - cnt);
3901        }
3902        if (req->cnt < req_cnt + 2) {
3903                rval = EXT_STATUS_BUSY;
3904                goto queuing_error;
3905        }
3906
3907        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3908        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3909
3910        /* Zero out remaining portion of packet. */
3911        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3912        clr_ptr = (uint32_t *)cmd_pkt + 2;
3913        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3914
3915        /* Set NPORT-ID  (of vha)*/
3916        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3917        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3918        cmd_pkt->port_id[1] = vha->d_id.b.area;
3919        cmd_pkt->port_id[2] = vha->d_id.b.domain;
3920
3921        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3922        cmd_pkt->entry_status = (uint8_t) rsp->id;
3923        /* Build command packet. */
3924        req->current_outstanding_cmd = handle;
3925        req->outstanding_cmds[handle] = sp;
3926        sp->handle = handle;
3927        req->cnt -= req_cnt;
3928
3929        /* Send the command to the firmware */
3930        wmb();
3931        qla2x00_start_iocbs(vha, req);
3932queuing_error:
3933        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3934        return rval;
3935}
3936