linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_target.h"
   9
  10#include <linux/blkdev.h>
  11#include <linux/delay.h>
  12
  13#include <scsi/scsi_tcq.h>
  14
  15static void qla25xx_set_que(srb_t *, struct rsp_que **);
  16/**
  17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  18 * @cmd: SCSI command
  19 *
  20 * Returns the proper CF_* direction based on CDB.
  21 */
  22static inline uint16_t
  23qla2x00_get_cmd_direction(srb_t *sp)
  24{
  25        uint16_t cflags;
  26        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  27        struct scsi_qla_host *vha = sp->fcport->vha;
  28
  29        cflags = 0;
  30
  31        /* Set transfer direction */
  32        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  33                cflags = CF_WRITE;
  34                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  35                vha->qla_stats.output_requests++;
  36        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  37                cflags = CF_READ;
  38                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  39                vha->qla_stats.input_requests++;
  40        }
  41        return (cflags);
  42}
  43
  44/**
  45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  46 * Continuation Type 0 IOCBs to allocate.
  47 *
  48 * @dsds: number of data segment decriptors needed
  49 *
  50 * Returns the number of IOCB entries needed to store @dsds.
  51 */
  52uint16_t
  53qla2x00_calc_iocbs_32(uint16_t dsds)
  54{
  55        uint16_t iocbs;
  56
  57        iocbs = 1;
  58        if (dsds > 3) {
  59                iocbs += (dsds - 3) / 7;
  60                if ((dsds - 3) % 7)
  61                        iocbs++;
  62        }
  63        return (iocbs);
  64}
  65
  66/**
  67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  68 * Continuation Type 1 IOCBs to allocate.
  69 *
  70 * @dsds: number of data segment decriptors needed
  71 *
  72 * Returns the number of IOCB entries needed to store @dsds.
  73 */
  74uint16_t
  75qla2x00_calc_iocbs_64(uint16_t dsds)
  76{
  77        uint16_t iocbs;
  78
  79        iocbs = 1;
  80        if (dsds > 2) {
  81                iocbs += (dsds - 2) / 5;
  82                if ((dsds - 2) % 5)
  83                        iocbs++;
  84        }
  85        return (iocbs);
  86}
  87
  88/**
  89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  90 * @ha: HA context
  91 *
  92 * Returns a pointer to the Continuation Type 0 IOCB packet.
  93 */
  94static inline cont_entry_t *
  95qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  96{
  97        cont_entry_t *cont_pkt;
  98        struct req_que *req = vha->req;
  99        /* Adjust ring index. */
 100        req->ring_index++;
 101        if (req->ring_index == req->length) {
 102                req->ring_index = 0;
 103                req->ring_ptr = req->ring;
 104        } else {
 105                req->ring_ptr++;
 106        }
 107
 108        cont_pkt = (cont_entry_t *)req->ring_ptr;
 109
 110        /* Load packet defaults. */
 111        *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
 112
 113        return (cont_pkt);
 114}
 115
 116/**
 117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 118 * @ha: HA context
 119 *
 120 * Returns a pointer to the continuation type 1 IOCB packet.
 121 */
 122static inline cont_a64_entry_t *
 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 124{
 125        cont_a64_entry_t *cont_pkt;
 126
 127        /* Adjust ring index. */
 128        req->ring_index++;
 129        if (req->ring_index == req->length) {
 130                req->ring_index = 0;
 131                req->ring_ptr = req->ring;
 132        } else {
 133                req->ring_ptr++;
 134        }
 135
 136        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 137
 138        /* Load packet defaults. */
 139        *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
 140            cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
 141            cpu_to_le32(CONTINUE_A64_TYPE);
 142
 143        return (cont_pkt);
 144}
 145
 146static inline int
 147qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 148{
 149        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 150        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 151
 152        /* We always use DIFF Bundling for best performance */
 153        *fw_prot_opts = 0;
 154
 155        /* Translate SCSI opcode to a protection opcode */
 156        switch (scsi_get_prot_op(cmd)) {
 157        case SCSI_PROT_READ_STRIP:
 158                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 159                break;
 160        case SCSI_PROT_WRITE_INSERT:
 161                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 162                break;
 163        case SCSI_PROT_READ_INSERT:
 164                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 165                break;
 166        case SCSI_PROT_WRITE_STRIP:
 167                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 168                break;
 169        case SCSI_PROT_READ_PASS:
 170        case SCSI_PROT_WRITE_PASS:
 171                if (guard & SHOST_DIX_GUARD_IP)
 172                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 173                else
 174                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 175                break;
 176        default:        /* Normal Request */
 177                *fw_prot_opts |= PO_MODE_DIF_PASS;
 178                break;
 179        }
 180
 181        return scsi_prot_sg_count(cmd);
 182}
 183
 184/*
 185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 186 * capable IOCB types.
 187 *
 188 * @sp: SRB command to process
 189 * @cmd_pkt: Command type 2 IOCB
 190 * @tot_dsds: Total number of segments to transfer
 191 */
 192void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 193    uint16_t tot_dsds)
 194{
 195        uint16_t        avail_dsds;
 196        uint32_t        *cur_dsd;
 197        scsi_qla_host_t *vha;
 198        struct scsi_cmnd *cmd;
 199        struct scatterlist *sg;
 200        int i;
 201
 202        cmd = GET_CMD_SP(sp);
 203
 204        /* Update entry type to indicate Command Type 2 IOCB */
 205        *((uint32_t *)(&cmd_pkt->entry_type)) =
 206            cpu_to_le32(COMMAND_TYPE);
 207
 208        /* No data transfer */
 209        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 210                cmd_pkt->byte_count = cpu_to_le32(0);
 211                return;
 212        }
 213
 214        vha = sp->fcport->vha;
 215        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 216
 217        /* Three DSDs are available in the Command Type 2 IOCB */
 218        avail_dsds = 3;
 219        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 220
 221        /* Load data segments */
 222        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 223                cont_entry_t *cont_pkt;
 224
 225                /* Allocate additional continuation packets? */
 226                if (avail_dsds == 0) {
 227                        /*
 228                         * Seven DSDs are available in the Continuation
 229                         * Type 0 IOCB.
 230                         */
 231                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 232                        cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
 233                        avail_dsds = 7;
 234                }
 235
 236                *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
 237                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 238                avail_dsds--;
 239        }
 240}
 241
 242/**
 243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 244 * capable IOCB types.
 245 *
 246 * @sp: SRB command to process
 247 * @cmd_pkt: Command type 3 IOCB
 248 * @tot_dsds: Total number of segments to transfer
 249 */
 250void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 251    uint16_t tot_dsds)
 252{
 253        uint16_t        avail_dsds;
 254        uint32_t        *cur_dsd;
 255        scsi_qla_host_t *vha;
 256        struct scsi_cmnd *cmd;
 257        struct scatterlist *sg;
 258        int i;
 259
 260        cmd = GET_CMD_SP(sp);
 261
 262        /* Update entry type to indicate Command Type 3 IOCB */
 263        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
 264
 265        /* No data transfer */
 266        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 267                cmd_pkt->byte_count = cpu_to_le32(0);
 268                return;
 269        }
 270
 271        vha = sp->fcport->vha;
 272        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 273
 274        /* Two DSDs are available in the Command Type 3 IOCB */
 275        avail_dsds = 2;
 276        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 277
 278        /* Load data segments */
 279        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 280                dma_addr_t      sle_dma;
 281                cont_a64_entry_t *cont_pkt;
 282
 283                /* Allocate additional continuation packets? */
 284                if (avail_dsds == 0) {
 285                        /*
 286                         * Five DSDs are available in the Continuation
 287                         * Type 1 IOCB.
 288                         */
 289                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 290                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 291                        avail_dsds = 5;
 292                }
 293
 294                sle_dma = sg_dma_address(sg);
 295                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 296                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 297                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 298                avail_dsds--;
 299        }
 300}
 301
 302/**
 303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 304 * @sp: command to send to the ISP
 305 *
 306 * Returns non-zero if a failure occurred, else zero.
 307 */
 308int
 309qla2x00_start_scsi(srb_t *sp)
 310{
 311        int             nseg;
 312        unsigned long   flags;
 313        scsi_qla_host_t *vha;
 314        struct scsi_cmnd *cmd;
 315        uint32_t        *clr_ptr;
 316        uint32_t        index;
 317        uint32_t        handle;
 318        cmd_entry_t     *cmd_pkt;
 319        uint16_t        cnt;
 320        uint16_t        req_cnt;
 321        uint16_t        tot_dsds;
 322        struct device_reg_2xxx __iomem *reg;
 323        struct qla_hw_data *ha;
 324        struct req_que *req;
 325        struct rsp_que *rsp;
 326
 327        /* Setup device pointers. */
 328        vha = sp->fcport->vha;
 329        ha = vha->hw;
 330        reg = &ha->iobase->isp;
 331        cmd = GET_CMD_SP(sp);
 332        req = ha->req_q_map[0];
 333        rsp = ha->rsp_q_map[0];
 334        /* So we know we haven't pci_map'ed anything yet */
 335        tot_dsds = 0;
 336
 337        /* Send marker if required */
 338        if (vha->marker_needed != 0) {
 339                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
 340                    QLA_SUCCESS) {
 341                        return (QLA_FUNCTION_FAILED);
 342                }
 343                vha->marker_needed = 0;
 344        }
 345
 346        /* Acquire ring specific lock */
 347        spin_lock_irqsave(&ha->hardware_lock, flags);
 348
 349        /* Check for room in outstanding command list. */
 350        handle = req->current_outstanding_cmd;
 351        for (index = 1; index < req->num_outstanding_cmds; index++) {
 352                handle++;
 353                if (handle == req->num_outstanding_cmds)
 354                        handle = 1;
 355                if (!req->outstanding_cmds[handle])
 356                        break;
 357        }
 358        if (index == req->num_outstanding_cmds)
 359                goto queuing_error;
 360
 361        /* Map the sg table so we have an accurate count of sg entries needed */
 362        if (scsi_sg_count(cmd)) {
 363                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 364                    scsi_sg_count(cmd), cmd->sc_data_direction);
 365                if (unlikely(!nseg))
 366                        goto queuing_error;
 367        } else
 368                nseg = 0;
 369
 370        tot_dsds = nseg;
 371
 372        /* Calculate the number of request entries needed. */
 373        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 374        if (req->cnt < (req_cnt + 2)) {
 375                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
 376                if (req->ring_index < cnt)
 377                        req->cnt = cnt - req->ring_index;
 378                else
 379                        req->cnt = req->length -
 380                            (req->ring_index - cnt);
 381                /* If still no head room then bail out */
 382                if (req->cnt < (req_cnt + 2))
 383                        goto queuing_error;
 384        }
 385
 386        /* Build command packet */
 387        req->current_outstanding_cmd = handle;
 388        req->outstanding_cmds[handle] = sp;
 389        sp->handle = handle;
 390        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 391        req->cnt -= req_cnt;
 392
 393        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 394        cmd_pkt->handle = handle;
 395        /* Zero out remaining portion of packet. */
 396        clr_ptr = (uint32_t *)cmd_pkt + 2;
 397        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 398        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 399
 400        /* Set target ID and LUN number*/
 401        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 402        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 403        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 404
 405        /* Load SCSI command packet. */
 406        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 407        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 408
 409        /* Build IOCB segments */
 410        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 411
 412        /* Set total data segment count. */
 413        cmd_pkt->entry_count = (uint8_t)req_cnt;
 414        wmb();
 415
 416        /* Adjust ring index. */
 417        req->ring_index++;
 418        if (req->ring_index == req->length) {
 419                req->ring_index = 0;
 420                req->ring_ptr = req->ring;
 421        } else
 422                req->ring_ptr++;
 423
 424        sp->flags |= SRB_DMA_VALID;
 425
 426        /* Set chip new ring index. */
 427        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 428        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 429
 430        /* Manage unprocessed RIO/ZIO commands in response queue. */
 431        if (vha->flags.process_response_queue &&
 432            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 433                qla2x00_process_response_queue(rsp);
 434
 435        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 436        return (QLA_SUCCESS);
 437
 438queuing_error:
 439        if (tot_dsds)
 440                scsi_dma_unmap(cmd);
 441
 442        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 443
 444        return (QLA_FUNCTION_FAILED);
 445}
 446
 447/**
 448 * qla2x00_start_iocbs() - Execute the IOCB command
 449 */
 450void
 451qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 452{
 453        struct qla_hw_data *ha = vha->hw;
 454        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 455
 456        if (IS_P3P_TYPE(ha)) {
 457                qla82xx_start_iocbs(vha);
 458        } else {
 459                /* Adjust ring index. */
 460                req->ring_index++;
 461                if (req->ring_index == req->length) {
 462                        req->ring_index = 0;
 463                        req->ring_ptr = req->ring;
 464                } else
 465                        req->ring_ptr++;
 466
 467                /* Set chip new ring index. */
 468                if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 469                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 470                        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 471                } else if (IS_QLAFX00(ha)) {
 472                        WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
 473                        RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
 474                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 475                } else if (IS_FWI2_CAPABLE(ha)) {
 476                        WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 477                        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
 478                } else {
 479                        WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
 480                                req->ring_index);
 481                        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
 482                }
 483        }
 484}
 485
 486/**
 487 * qla2x00_marker() - Send a marker IOCB to the firmware.
 488 * @ha: HA context
 489 * @loop_id: loop ID
 490 * @lun: LUN
 491 * @type: marker modifier
 492 *
 493 * Can be called from both normal and interrupt context.
 494 *
 495 * Returns non-zero if a failure occurred, else zero.
 496 */
 497static int
 498__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 499                        struct rsp_que *rsp, uint16_t loop_id,
 500                        uint64_t lun, uint8_t type)
 501{
 502        mrk_entry_t *mrk;
 503        struct mrk_entry_24xx *mrk24 = NULL;
 504
 505        struct qla_hw_data *ha = vha->hw;
 506        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 507
 508        req = ha->req_q_map[0];
 509        mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
 510        if (mrk == NULL) {
 511                ql_log(ql_log_warn, base_vha, 0x3026,
 512                    "Failed to allocate Marker IOCB.\n");
 513
 514                return (QLA_FUNCTION_FAILED);
 515        }
 516
 517        mrk->entry_type = MARKER_TYPE;
 518        mrk->modifier = type;
 519        if (type != MK_SYNC_ALL) {
 520                if (IS_FWI2_CAPABLE(ha)) {
 521                        mrk24 = (struct mrk_entry_24xx *) mrk;
 522                        mrk24->nport_handle = cpu_to_le16(loop_id);
 523                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 524                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 525                        mrk24->vp_index = vha->vp_idx;
 526                        mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
 527                } else {
 528                        SET_TARGET_ID(ha, mrk->target, loop_id);
 529                        mrk->lun = cpu_to_le16((uint16_t)lun);
 530                }
 531        }
 532        wmb();
 533
 534        qla2x00_start_iocbs(vha, req);
 535
 536        return (QLA_SUCCESS);
 537}
 538
 539int
 540qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 541                struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
 542                uint8_t type)
 543{
 544        int ret;
 545        unsigned long flags = 0;
 546
 547        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 548        ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
 549        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 550
 551        return (ret);
 552}
 553
 554/*
 555 * qla2x00_issue_marker
 556 *
 557 * Issue marker
 558 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 559 * Might release it, then reaquire.
 560 */
 561int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 562{
 563        if (ha_locked) {
 564                if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
 565                                        MK_SYNC_ALL) != QLA_SUCCESS)
 566                        return QLA_FUNCTION_FAILED;
 567        } else {
 568                if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
 569                                        MK_SYNC_ALL) != QLA_SUCCESS)
 570                        return QLA_FUNCTION_FAILED;
 571        }
 572        vha->marker_needed = 0;
 573
 574        return QLA_SUCCESS;
 575}
 576
 577static inline int
 578qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 579        uint16_t tot_dsds)
 580{
 581        uint32_t *cur_dsd = NULL;
 582        scsi_qla_host_t *vha;
 583        struct qla_hw_data *ha;
 584        struct scsi_cmnd *cmd;
 585        struct  scatterlist *cur_seg;
 586        uint32_t *dsd_seg;
 587        void *next_dsd;
 588        uint8_t avail_dsds;
 589        uint8_t first_iocb = 1;
 590        uint32_t dsd_list_len;
 591        struct dsd_dma *dsd_ptr;
 592        struct ct6_dsd *ctx;
 593
 594        cmd = GET_CMD_SP(sp);
 595
 596        /* Update entry type to indicate Command Type 3 IOCB */
 597        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
 598
 599        /* No data transfer */
 600        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 601                cmd_pkt->byte_count = cpu_to_le32(0);
 602                return 0;
 603        }
 604
 605        vha = sp->fcport->vha;
 606        ha = vha->hw;
 607
 608        /* Set transfer direction */
 609        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 610                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 611                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 612                vha->qla_stats.output_requests++;
 613        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 614                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 615                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 616                vha->qla_stats.input_requests++;
 617        }
 618
 619        cur_seg = scsi_sglist(cmd);
 620        ctx = GET_CMD_CTX_SP(sp);
 621
 622        while (tot_dsds) {
 623                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 624                    QLA_DSDS_PER_IOCB : tot_dsds;
 625                tot_dsds -= avail_dsds;
 626                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 627
 628                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 629                    struct dsd_dma, list);
 630                next_dsd = dsd_ptr->dsd_addr;
 631                list_del(&dsd_ptr->list);
 632                ha->gbl_dsd_avail--;
 633                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 634                ctx->dsd_use_cnt++;
 635                ha->gbl_dsd_inuse++;
 636
 637                if (first_iocb) {
 638                        first_iocb = 0;
 639                        dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
 640                        *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 641                        *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 642                        cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
 643                } else {
 644                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 645                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 646                        *cur_dsd++ = cpu_to_le32(dsd_list_len);
 647                }
 648                cur_dsd = (uint32_t *)next_dsd;
 649                while (avail_dsds) {
 650                        dma_addr_t      sle_dma;
 651
 652                        sle_dma = sg_dma_address(cur_seg);
 653                        *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 654                        *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 655                        *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
 656                        cur_seg = sg_next(cur_seg);
 657                        avail_dsds--;
 658                }
 659        }
 660
 661        /* Null termination */
 662        *cur_dsd++ =  0;
 663        *cur_dsd++ = 0;
 664        *cur_dsd++ = 0;
 665        cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 666        return 0;
 667}
 668
 669/*
 670 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 671 * for Command Type 6.
 672 *
 673 * @dsds: number of data segment decriptors needed
 674 *
 675 * Returns the number of dsd list needed to store @dsds.
 676 */
 677static inline uint16_t
 678qla24xx_calc_dsd_lists(uint16_t dsds)
 679{
 680        uint16_t dsd_lists = 0;
 681
 682        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 683        if (dsds % QLA_DSDS_PER_IOCB)
 684                dsd_lists++;
 685        return dsd_lists;
 686}
 687
 688
 689/**
 690 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 691 * IOCB types.
 692 *
 693 * @sp: SRB command to process
 694 * @cmd_pkt: Command type 3 IOCB
 695 * @tot_dsds: Total number of segments to transfer
 696 */
 697static inline void
 698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 699    uint16_t tot_dsds)
 700{
 701        uint16_t        avail_dsds;
 702        uint32_t        *cur_dsd;
 703        scsi_qla_host_t *vha;
 704        struct scsi_cmnd *cmd;
 705        struct scatterlist *sg;
 706        int i;
 707
 708        cmd = GET_CMD_SP(sp);
 709
 710        /* Update entry type to indicate Command Type 3 IOCB */
 711        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
 712
 713        /* No data transfer */
 714        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 715                cmd_pkt->byte_count = cpu_to_le32(0);
 716                return;
 717        }
 718
 719        vha = sp->fcport->vha;
 720
 721        /* Set transfer direction */
 722        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 723                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 724                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 725                vha->qla_stats.output_requests++;
 726        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 727                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 728                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 729                vha->qla_stats.input_requests++;
 730        }
 731
 732        /* One DSD is available in the Command Type 3 IOCB */
 733        avail_dsds = 1;
 734        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
 735
 736        /* Load data segments */
 737
 738        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 739                dma_addr_t      sle_dma;
 740                cont_a64_entry_t *cont_pkt;
 741
 742                /* Allocate additional continuation packets? */
 743                if (avail_dsds == 0) {
 744                        /*
 745                         * Five DSDs are available in the Continuation
 746                         * Type 1 IOCB.
 747                         */
 748                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 749                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 750                        avail_dsds = 5;
 751                }
 752
 753                sle_dma = sg_dma_address(sg);
 754                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 755                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 756                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 757                avail_dsds--;
 758        }
 759}
 760
 761struct fw_dif_context {
 762        uint32_t ref_tag;
 763        uint16_t app_tag;
 764        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 765        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 766};
 767
 768/*
 769 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 770 *
 771 */
 772static inline void
 773qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 774    unsigned int protcnt)
 775{
 776        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 777
 778        switch (scsi_get_prot_type(cmd)) {
 779        case SCSI_PROT_DIF_TYPE0:
 780                /*
 781                 * No check for ql2xenablehba_err_chk, as it would be an
 782                 * I/O error if hba tag generation is not done.
 783                 */
 784                pkt->ref_tag = cpu_to_le32((uint32_t)
 785                    (0xffffffff & scsi_get_lba(cmd)));
 786
 787                if (!qla2x00_hba_err_chk_enabled(sp))
 788                        break;
 789
 790                pkt->ref_tag_mask[0] = 0xff;
 791                pkt->ref_tag_mask[1] = 0xff;
 792                pkt->ref_tag_mask[2] = 0xff;
 793                pkt->ref_tag_mask[3] = 0xff;
 794                break;
 795
 796        /*
 797         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 798         * match LBA in CDB + N
 799         */
 800        case SCSI_PROT_DIF_TYPE2:
 801                pkt->app_tag = cpu_to_le16(0);
 802                pkt->app_tag_mask[0] = 0x0;
 803                pkt->app_tag_mask[1] = 0x0;
 804
 805                pkt->ref_tag = cpu_to_le32((uint32_t)
 806                    (0xffffffff & scsi_get_lba(cmd)));
 807
 808                if (!qla2x00_hba_err_chk_enabled(sp))
 809                        break;
 810
 811                /* enable ALL bytes of the ref tag */
 812                pkt->ref_tag_mask[0] = 0xff;
 813                pkt->ref_tag_mask[1] = 0xff;
 814                pkt->ref_tag_mask[2] = 0xff;
 815                pkt->ref_tag_mask[3] = 0xff;
 816                break;
 817
 818        /* For Type 3 protection: 16 bit GUARD only */
 819        case SCSI_PROT_DIF_TYPE3:
 820                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 821                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 822                                                                0x00;
 823                break;
 824
 825        /*
 826         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 827         * 16 bit app tag.
 828         */
 829        case SCSI_PROT_DIF_TYPE1:
 830                pkt->ref_tag = cpu_to_le32((uint32_t)
 831                    (0xffffffff & scsi_get_lba(cmd)));
 832                pkt->app_tag = cpu_to_le16(0);
 833                pkt->app_tag_mask[0] = 0x0;
 834                pkt->app_tag_mask[1] = 0x0;
 835
 836                if (!qla2x00_hba_err_chk_enabled(sp))
 837                        break;
 838
 839                /* enable ALL bytes of the ref tag */
 840                pkt->ref_tag_mask[0] = 0xff;
 841                pkt->ref_tag_mask[1] = 0xff;
 842                pkt->ref_tag_mask[2] = 0xff;
 843                pkt->ref_tag_mask[3] = 0xff;
 844                break;
 845        }
 846}
 847
 848struct qla2_sgx {
 849        dma_addr_t              dma_addr;       /* OUT */
 850        uint32_t                dma_len;        /* OUT */
 851
 852        uint32_t                tot_bytes;      /* IN */
 853        struct scatterlist      *cur_sg;        /* IN */
 854
 855        /* for book keeping, bzero on initial invocation */
 856        uint32_t                bytes_consumed;
 857        uint32_t                num_bytes;
 858        uint32_t                tot_partial;
 859
 860        /* for debugging */
 861        uint32_t                num_sg;
 862        srb_t                   *sp;
 863};
 864
 865static int
 866qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 867        uint32_t *partial)
 868{
 869        struct scatterlist *sg;
 870        uint32_t cumulative_partial, sg_len;
 871        dma_addr_t sg_dma_addr;
 872
 873        if (sgx->num_bytes == sgx->tot_bytes)
 874                return 0;
 875
 876        sg = sgx->cur_sg;
 877        cumulative_partial = sgx->tot_partial;
 878
 879        sg_dma_addr = sg_dma_address(sg);
 880        sg_len = sg_dma_len(sg);
 881
 882        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 883
 884        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 885                sgx->dma_len = (blk_sz - cumulative_partial);
 886                sgx->tot_partial = 0;
 887                sgx->num_bytes += blk_sz;
 888                *partial = 0;
 889        } else {
 890                sgx->dma_len = sg_len - sgx->bytes_consumed;
 891                sgx->tot_partial += sgx->dma_len;
 892                *partial = 1;
 893        }
 894
 895        sgx->bytes_consumed += sgx->dma_len;
 896
 897        if (sg_len == sgx->bytes_consumed) {
 898                sg = sg_next(sg);
 899                sgx->num_sg++;
 900                sgx->cur_sg = sg;
 901                sgx->bytes_consumed = 0;
 902        }
 903
 904        return 1;
 905}
 906
 907int
 908qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 909        uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 910{
 911        void *next_dsd;
 912        uint8_t avail_dsds = 0;
 913        uint32_t dsd_list_len;
 914        struct dsd_dma *dsd_ptr;
 915        struct scatterlist *sg_prot;
 916        uint32_t *cur_dsd = dsd;
 917        uint16_t        used_dsds = tot_dsds;
 918
 919        uint32_t        prot_int; /* protection interval */
 920        uint32_t        partial;
 921        struct qla2_sgx sgx;
 922        dma_addr_t      sle_dma;
 923        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 924        struct scsi_cmnd *cmd;
 925
 926        memset(&sgx, 0, sizeof(struct qla2_sgx));
 927        if (sp) {
 928                cmd = GET_CMD_SP(sp);
 929                prot_int = cmd->device->sector_size;
 930
 931                sgx.tot_bytes = scsi_bufflen(cmd);
 932                sgx.cur_sg = scsi_sglist(cmd);
 933                sgx.sp = sp;
 934
 935                sg_prot = scsi_prot_sglist(cmd);
 936        } else if (tc) {
 937                prot_int      = tc->blk_sz;
 938                sgx.tot_bytes = tc->bufflen;
 939                sgx.cur_sg    = tc->sg;
 940                sg_prot       = tc->prot_sg;
 941        } else {
 942                BUG();
 943                return 1;
 944        }
 945
 946        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 947
 948                sle_dma = sgx.dma_addr;
 949                sle_dma_len = sgx.dma_len;
 950alloc_and_fill:
 951                /* Allocate additional continuation packets? */
 952                if (avail_dsds == 0) {
 953                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 954                                        QLA_DSDS_PER_IOCB : used_dsds;
 955                        dsd_list_len = (avail_dsds + 1) * 12;
 956                        used_dsds -= avail_dsds;
 957
 958                        /* allocate tracking DS */
 959                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 960                        if (!dsd_ptr)
 961                                return 1;
 962
 963                        /* allocate new list */
 964                        dsd_ptr->dsd_addr = next_dsd =
 965                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 966                                &dsd_ptr->dsd_list_dma);
 967
 968                        if (!next_dsd) {
 969                                /*
 970                                 * Need to cleanup only this dsd_ptr, rest
 971                                 * will be done by sp_free_dma()
 972                                 */
 973                                kfree(dsd_ptr);
 974                                return 1;
 975                        }
 976
 977                        if (sp) {
 978                                list_add_tail(&dsd_ptr->list,
 979                                    &((struct crc_context *)
 980                                            sp->u.scmd.ctx)->dsd_list);
 981
 982                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 983                        } else {
 984                                list_add_tail(&dsd_ptr->list,
 985                                    &(tc->ctx->dsd_list));
 986                                tc->ctx_dsd_alloced = 1;
 987                        }
 988
 989
 990                        /* add new list to cmd iocb or last list */
 991                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
 992                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
 993                        *cur_dsd++ = dsd_list_len;
 994                        cur_dsd = (uint32_t *)next_dsd;
 995                }
 996                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 997                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 998                *cur_dsd++ = cpu_to_le32(sle_dma_len);
 999                avail_dsds--;
1000
1001                if (partial == 0) {
1002                        /* Got a full protection interval */
1003                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1004                        sle_dma_len = 8;
1005
1006                        tot_prot_dma_len += sle_dma_len;
1007                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1008                                tot_prot_dma_len = 0;
1009                                sg_prot = sg_next(sg_prot);
1010                        }
1011
1012                        partial = 1; /* So as to not re-enter this block */
1013                        goto alloc_and_fill;
1014                }
1015        }
1016        /* Null termination */
1017        *cur_dsd++ = 0;
1018        *cur_dsd++ = 0;
1019        *cur_dsd++ = 0;
1020        return 0;
1021}
1022
1023int
1024qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1025        uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1026{
1027        void *next_dsd;
1028        uint8_t avail_dsds = 0;
1029        uint32_t dsd_list_len;
1030        struct dsd_dma *dsd_ptr;
1031        struct scatterlist *sg, *sgl;
1032        uint32_t *cur_dsd = dsd;
1033        int     i;
1034        uint16_t        used_dsds = tot_dsds;
1035        struct scsi_cmnd *cmd;
1036
1037        if (sp) {
1038                cmd = GET_CMD_SP(sp);
1039                sgl = scsi_sglist(cmd);
1040        } else if (tc) {
1041                sgl = tc->sg;
1042        } else {
1043                BUG();
1044                return 1;
1045        }
1046
1047
1048        for_each_sg(sgl, sg, tot_dsds, i) {
1049                dma_addr_t      sle_dma;
1050
1051                /* Allocate additional continuation packets? */
1052                if (avail_dsds == 0) {
1053                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1054                                        QLA_DSDS_PER_IOCB : used_dsds;
1055                        dsd_list_len = (avail_dsds + 1) * 12;
1056                        used_dsds -= avail_dsds;
1057
1058                        /* allocate tracking DS */
1059                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1060                        if (!dsd_ptr)
1061                                return 1;
1062
1063                        /* allocate new list */
1064                        dsd_ptr->dsd_addr = next_dsd =
1065                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1066                                &dsd_ptr->dsd_list_dma);
1067
1068                        if (!next_dsd) {
1069                                /*
1070                                 * Need to cleanup only this dsd_ptr, rest
1071                                 * will be done by sp_free_dma()
1072                                 */
1073                                kfree(dsd_ptr);
1074                                return 1;
1075                        }
1076
1077                        if (sp) {
1078                                list_add_tail(&dsd_ptr->list,
1079                                    &((struct crc_context *)
1080                                            sp->u.scmd.ctx)->dsd_list);
1081
1082                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1083                        } else {
1084                                list_add_tail(&dsd_ptr->list,
1085                                    &(tc->ctx->dsd_list));
1086                                tc->ctx_dsd_alloced = 1;
1087                        }
1088
1089                        /* add new list to cmd iocb or last list */
1090                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1091                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1092                        *cur_dsd++ = dsd_list_len;
1093                        cur_dsd = (uint32_t *)next_dsd;
1094                }
1095                sle_dma = sg_dma_address(sg);
1096
1097                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1098                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1099                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1100                avail_dsds--;
1101
1102        }
1103        /* Null termination */
1104        *cur_dsd++ = 0;
1105        *cur_dsd++ = 0;
1106        *cur_dsd++ = 0;
1107        return 0;
1108}
1109
1110int
1111qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1112        uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1113{
1114        void *next_dsd;
1115        uint8_t avail_dsds = 0;
1116        uint32_t dsd_list_len;
1117        struct dsd_dma *dsd_ptr;
1118        struct scatterlist *sg, *sgl;
1119        int     i;
1120        struct scsi_cmnd *cmd;
1121        uint32_t *cur_dsd = dsd;
1122        uint16_t used_dsds = tot_dsds;
1123        struct scsi_qla_host *vha;
1124
1125        if (sp) {
1126                cmd = GET_CMD_SP(sp);
1127                sgl = scsi_prot_sglist(cmd);
1128                vha = sp->fcport->vha;
1129        } else if (tc) {
1130                vha = tc->vha;
1131                sgl = tc->prot_sg;
1132        } else {
1133                BUG();
1134                return 1;
1135        }
1136
1137        ql_dbg(ql_dbg_tgt, vha, 0xe021,
1138                "%s: enter\n", __func__);
1139
1140        for_each_sg(sgl, sg, tot_dsds, i) {
1141                dma_addr_t      sle_dma;
1142
1143                /* Allocate additional continuation packets? */
1144                if (avail_dsds == 0) {
1145                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1146                                                QLA_DSDS_PER_IOCB : used_dsds;
1147                        dsd_list_len = (avail_dsds + 1) * 12;
1148                        used_dsds -= avail_dsds;
1149
1150                        /* allocate tracking DS */
1151                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1152                        if (!dsd_ptr)
1153                                return 1;
1154
1155                        /* allocate new list */
1156                        dsd_ptr->dsd_addr = next_dsd =
1157                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1158                                &dsd_ptr->dsd_list_dma);
1159
1160                        if (!next_dsd) {
1161                                /*
1162                                 * Need to cleanup only this dsd_ptr, rest
1163                                 * will be done by sp_free_dma()
1164                                 */
1165                                kfree(dsd_ptr);
1166                                return 1;
1167                        }
1168
1169                        if (sp) {
1170                                list_add_tail(&dsd_ptr->list,
1171                                    &((struct crc_context *)
1172                                            sp->u.scmd.ctx)->dsd_list);
1173
1174                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1175                        } else {
1176                                list_add_tail(&dsd_ptr->list,
1177                                    &(tc->ctx->dsd_list));
1178                                tc->ctx_dsd_alloced = 1;
1179                        }
1180
1181                        /* add new list to cmd iocb or last list */
1182                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1183                        *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1184                        *cur_dsd++ = dsd_list_len;
1185                        cur_dsd = (uint32_t *)next_dsd;
1186                }
1187                sle_dma = sg_dma_address(sg);
1188
1189                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1190                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1191                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1192
1193                avail_dsds--;
1194        }
1195        /* Null termination */
1196        *cur_dsd++ = 0;
1197        *cur_dsd++ = 0;
1198        *cur_dsd++ = 0;
1199        return 0;
1200}
1201
1202/**
1203 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1204 *                                                      Type 6 IOCB types.
1205 *
1206 * @sp: SRB command to process
1207 * @cmd_pkt: Command type 3 IOCB
1208 * @tot_dsds: Total number of segments to transfer
1209 */
1210static inline int
1211qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1212    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1213{
1214        uint32_t                *cur_dsd, *fcp_dl;
1215        scsi_qla_host_t         *vha;
1216        struct scsi_cmnd        *cmd;
1217        uint32_t                total_bytes = 0;
1218        uint32_t                data_bytes;
1219        uint32_t                dif_bytes;
1220        uint8_t                 bundling = 1;
1221        uint16_t                blk_size;
1222        uint8_t                 *clr_ptr;
1223        struct crc_context      *crc_ctx_pkt = NULL;
1224        struct qla_hw_data      *ha;
1225        uint8_t                 additional_fcpcdb_len;
1226        uint16_t                fcp_cmnd_len;
1227        struct fcp_cmnd         *fcp_cmnd;
1228        dma_addr_t              crc_ctx_dma;
1229
1230        cmd = GET_CMD_SP(sp);
1231
1232        /* Update entry type to indicate Command Type CRC_2 IOCB */
1233        *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1234
1235        vha = sp->fcport->vha;
1236        ha = vha->hw;
1237
1238        /* No data transfer */
1239        data_bytes = scsi_bufflen(cmd);
1240        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1241                cmd_pkt->byte_count = cpu_to_le32(0);
1242                return QLA_SUCCESS;
1243        }
1244
1245        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1246
1247        /* Set transfer direction */
1248        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1249                cmd_pkt->control_flags =
1250                    cpu_to_le16(CF_WRITE_DATA);
1251        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1252                cmd_pkt->control_flags =
1253                    cpu_to_le16(CF_READ_DATA);
1254        }
1255
1256        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1257            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1258            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1259            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1260                bundling = 0;
1261
1262        /* Allocate CRC context from global pool */
1263        crc_ctx_pkt = sp->u.scmd.ctx =
1264            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1265
1266        if (!crc_ctx_pkt)
1267                goto crc_queuing_error;
1268
1269        /* Zero out CTX area. */
1270        clr_ptr = (uint8_t *)crc_ctx_pkt;
1271        memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1272
1273        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1274
1275        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1276
1277        /* Set handle */
1278        crc_ctx_pkt->handle = cmd_pkt->handle;
1279
1280        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1281
1282        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1283            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1284
1285        cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1286        cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1287        cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1288
1289        /* Determine SCSI command length -- align to 4 byte boundary */
1290        if (cmd->cmd_len > 16) {
1291                additional_fcpcdb_len = cmd->cmd_len - 16;
1292                if ((cmd->cmd_len % 4) != 0) {
1293                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1294                        goto crc_queuing_error;
1295                }
1296                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1297        } else {
1298                additional_fcpcdb_len = 0;
1299                fcp_cmnd_len = 12 + 16 + 4;
1300        }
1301
1302        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1303
1304        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1305        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1306                fcp_cmnd->additional_cdb_len |= 1;
1307        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1308                fcp_cmnd->additional_cdb_len |= 2;
1309
1310        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1311        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1312        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1313        cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1314            LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1315        cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1316            MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1317        fcp_cmnd->task_management = 0;
1318        fcp_cmnd->task_attribute = TSK_SIMPLE;
1319
1320        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1321
1322        /* Compute dif len and adjust data len to incude protection */
1323        dif_bytes = 0;
1324        blk_size = cmd->device->sector_size;
1325        dif_bytes = (data_bytes / blk_size) * 8;
1326
1327        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1328        case SCSI_PROT_READ_INSERT:
1329        case SCSI_PROT_WRITE_STRIP:
1330            total_bytes = data_bytes;
1331            data_bytes += dif_bytes;
1332            break;
1333
1334        case SCSI_PROT_READ_STRIP:
1335        case SCSI_PROT_WRITE_INSERT:
1336        case SCSI_PROT_READ_PASS:
1337        case SCSI_PROT_WRITE_PASS:
1338            total_bytes = data_bytes + dif_bytes;
1339            break;
1340        default:
1341            BUG();
1342        }
1343
1344        if (!qla2x00_hba_err_chk_enabled(sp))
1345                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1346        /* HBA error checking enabled */
1347        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1348                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1349                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1350                        SCSI_PROT_DIF_TYPE2))
1351                        fw_prot_opts |= BIT_10;
1352                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1353                    SCSI_PROT_DIF_TYPE3)
1354                        fw_prot_opts |= BIT_11;
1355        }
1356
1357        if (!bundling) {
1358                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1359        } else {
1360                /*
1361                 * Configure Bundling if we need to fetch interlaving
1362                 * protection PCI accesses
1363                 */
1364                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1365                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1366                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1367                                                        tot_prot_dsds);
1368                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1369        }
1370
1371        /* Finish the common fields of CRC pkt */
1372        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1373        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1374        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1375        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1376        /* Fibre channel byte count */
1377        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1378        fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1379            additional_fcpcdb_len);
1380        *fcp_dl = htonl(total_bytes);
1381
1382        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1383                cmd_pkt->byte_count = cpu_to_le32(0);
1384                return QLA_SUCCESS;
1385        }
1386        /* Walks data segments */
1387
1388        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1389
1390        if (!bundling && tot_prot_dsds) {
1391                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1392                        cur_dsd, tot_dsds, NULL))
1393                        goto crc_queuing_error;
1394        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1395                        (tot_dsds - tot_prot_dsds), NULL))
1396                goto crc_queuing_error;
1397
1398        if (bundling && tot_prot_dsds) {
1399                /* Walks dif segments */
1400                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1401                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1402                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1403                                tot_prot_dsds, NULL))
1404                        goto crc_queuing_error;
1405        }
1406        return QLA_SUCCESS;
1407
1408crc_queuing_error:
1409        /* Cleanup will be performed by the caller */
1410
1411        return QLA_FUNCTION_FAILED;
1412}
1413
1414/**
1415 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1416 * @sp: command to send to the ISP
1417 *
1418 * Returns non-zero if a failure occurred, else zero.
1419 */
1420int
1421qla24xx_start_scsi(srb_t *sp)
1422{
1423        int             nseg;
1424        unsigned long   flags;
1425        uint32_t        *clr_ptr;
1426        uint32_t        index;
1427        uint32_t        handle;
1428        struct cmd_type_7 *cmd_pkt;
1429        uint16_t        cnt;
1430        uint16_t        req_cnt;
1431        uint16_t        tot_dsds;
1432        struct req_que *req = NULL;
1433        struct rsp_que *rsp = NULL;
1434        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1435        struct scsi_qla_host *vha = sp->fcport->vha;
1436        struct qla_hw_data *ha = vha->hw;
1437
1438        /* Setup device pointers. */
1439        qla25xx_set_que(sp, &rsp);
1440        req = vha->req;
1441
1442        /* So we know we haven't pci_map'ed anything yet */
1443        tot_dsds = 0;
1444
1445        /* Send marker if required */
1446        if (vha->marker_needed != 0) {
1447                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1448                    QLA_SUCCESS)
1449                        return QLA_FUNCTION_FAILED;
1450                vha->marker_needed = 0;
1451        }
1452
1453        /* Acquire ring specific lock */
1454        spin_lock_irqsave(&ha->hardware_lock, flags);
1455
1456        /* Check for room in outstanding command list. */
1457        handle = req->current_outstanding_cmd;
1458        for (index = 1; index < req->num_outstanding_cmds; index++) {
1459                handle++;
1460                if (handle == req->num_outstanding_cmds)
1461                        handle = 1;
1462                if (!req->outstanding_cmds[handle])
1463                        break;
1464        }
1465        if (index == req->num_outstanding_cmds)
1466                goto queuing_error;
1467
1468        /* Map the sg table so we have an accurate count of sg entries needed */
1469        if (scsi_sg_count(cmd)) {
1470                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1471                    scsi_sg_count(cmd), cmd->sc_data_direction);
1472                if (unlikely(!nseg))
1473                        goto queuing_error;
1474        } else
1475                nseg = 0;
1476
1477        tot_dsds = nseg;
1478        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1479        if (req->cnt < (req_cnt + 2)) {
1480                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1481                    RD_REG_DWORD_RELAXED(req->req_q_out);
1482                if (req->ring_index < cnt)
1483                        req->cnt = cnt - req->ring_index;
1484                else
1485                        req->cnt = req->length -
1486                                (req->ring_index - cnt);
1487                if (req->cnt < (req_cnt + 2))
1488                        goto queuing_error;
1489        }
1490
1491        /* Build command packet. */
1492        req->current_outstanding_cmd = handle;
1493        req->outstanding_cmds[handle] = sp;
1494        sp->handle = handle;
1495        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1496        req->cnt -= req_cnt;
1497
1498        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1499        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1500
1501        /* Zero out remaining portion of packet. */
1502        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1503        clr_ptr = (uint32_t *)cmd_pkt + 2;
1504        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1505        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1506
1507        /* Set NPORT-ID and LUN number*/
1508        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1509        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1510        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1511        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1512        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1513
1514        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1515        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1516
1517        cmd_pkt->task = TSK_SIMPLE;
1518
1519        /* Load SCSI command packet. */
1520        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1521        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1522
1523        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1524
1525        /* Build IOCB segments */
1526        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1527
1528        /* Set total data segment count. */
1529        cmd_pkt->entry_count = (uint8_t)req_cnt;
1530        /* Specify response queue number where completion should happen */
1531        cmd_pkt->entry_status = (uint8_t) rsp->id;
1532        wmb();
1533        /* Adjust ring index. */
1534        req->ring_index++;
1535        if (req->ring_index == req->length) {
1536                req->ring_index = 0;
1537                req->ring_ptr = req->ring;
1538        } else
1539                req->ring_ptr++;
1540
1541        sp->flags |= SRB_DMA_VALID;
1542
1543        /* Set chip new ring index. */
1544        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1545        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1546
1547        /* Manage unprocessed RIO/ZIO commands in response queue. */
1548        if (vha->flags.process_response_queue &&
1549                rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1550                qla24xx_process_response_queue(vha, rsp);
1551
1552        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1553        return QLA_SUCCESS;
1554
1555queuing_error:
1556        if (tot_dsds)
1557                scsi_dma_unmap(cmd);
1558
1559        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1560
1561        return QLA_FUNCTION_FAILED;
1562}
1563
1564/**
1565 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1566 * @sp: command to send to the ISP
1567 *
1568 * Returns non-zero if a failure occurred, else zero.
1569 */
1570int
1571qla24xx_dif_start_scsi(srb_t *sp)
1572{
1573        int                     nseg;
1574        unsigned long           flags;
1575        uint32_t                *clr_ptr;
1576        uint32_t                index;
1577        uint32_t                handle;
1578        uint16_t                cnt;
1579        uint16_t                req_cnt = 0;
1580        uint16_t                tot_dsds;
1581        uint16_t                tot_prot_dsds;
1582        uint16_t                fw_prot_opts = 0;
1583        struct req_que          *req = NULL;
1584        struct rsp_que          *rsp = NULL;
1585        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1586        struct scsi_qla_host    *vha = sp->fcport->vha;
1587        struct qla_hw_data      *ha = vha->hw;
1588        struct cmd_type_crc_2   *cmd_pkt;
1589        uint32_t                status = 0;
1590
1591#define QDSS_GOT_Q_SPACE        BIT_0
1592
1593        /* Only process protection or >16 cdb in this routine */
1594        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1595                if (cmd->cmd_len <= 16)
1596                        return qla24xx_start_scsi(sp);
1597        }
1598
1599        /* Setup device pointers. */
1600
1601        qla25xx_set_que(sp, &rsp);
1602        req = vha->req;
1603
1604        /* So we know we haven't pci_map'ed anything yet */
1605        tot_dsds = 0;
1606
1607        /* Send marker if required */
1608        if (vha->marker_needed != 0) {
1609                if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1610                    QLA_SUCCESS)
1611                        return QLA_FUNCTION_FAILED;
1612                vha->marker_needed = 0;
1613        }
1614
1615        /* Acquire ring specific lock */
1616        spin_lock_irqsave(&ha->hardware_lock, flags);
1617
1618        /* Check for room in outstanding command list. */
1619        handle = req->current_outstanding_cmd;
1620        for (index = 1; index < req->num_outstanding_cmds; index++) {
1621                handle++;
1622                if (handle == req->num_outstanding_cmds)
1623                        handle = 1;
1624                if (!req->outstanding_cmds[handle])
1625                        break;
1626        }
1627
1628        if (index == req->num_outstanding_cmds)
1629                goto queuing_error;
1630
1631        /* Compute number of required data segments */
1632        /* Map the sg table so we have an accurate count of sg entries needed */
1633        if (scsi_sg_count(cmd)) {
1634                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1635                    scsi_sg_count(cmd), cmd->sc_data_direction);
1636                if (unlikely(!nseg))
1637                        goto queuing_error;
1638                else
1639                        sp->flags |= SRB_DMA_VALID;
1640
1641                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1642                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1643                        struct qla2_sgx sgx;
1644                        uint32_t        partial;
1645
1646                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1647                        sgx.tot_bytes = scsi_bufflen(cmd);
1648                        sgx.cur_sg = scsi_sglist(cmd);
1649                        sgx.sp = sp;
1650
1651                        nseg = 0;
1652                        while (qla24xx_get_one_block_sg(
1653                            cmd->device->sector_size, &sgx, &partial))
1654                                nseg++;
1655                }
1656        } else
1657                nseg = 0;
1658
1659        /* number of required data segments */
1660        tot_dsds = nseg;
1661
1662        /* Compute number of required protection segments */
1663        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1664                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1665                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1666                if (unlikely(!nseg))
1667                        goto queuing_error;
1668                else
1669                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1670
1671                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1672                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1673                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1674                }
1675        } else {
1676                nseg = 0;
1677        }
1678
1679        req_cnt = 1;
1680        /* Total Data and protection sg segment(s) */
1681        tot_prot_dsds = nseg;
1682        tot_dsds += nseg;
1683        if (req->cnt < (req_cnt + 2)) {
1684                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1685                    RD_REG_DWORD_RELAXED(req->req_q_out);
1686                if (req->ring_index < cnt)
1687                        req->cnt = cnt - req->ring_index;
1688                else
1689                        req->cnt = req->length -
1690                                (req->ring_index - cnt);
1691                if (req->cnt < (req_cnt + 2))
1692                        goto queuing_error;
1693        }
1694
1695        status |= QDSS_GOT_Q_SPACE;
1696
1697        /* Build header part of command packet (excluding the OPCODE). */
1698        req->current_outstanding_cmd = handle;
1699        req->outstanding_cmds[handle] = sp;
1700        sp->handle = handle;
1701        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1702        req->cnt -= req_cnt;
1703
1704        /* Fill-in common area */
1705        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1706        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1707
1708        clr_ptr = (uint32_t *)cmd_pkt + 2;
1709        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1710
1711        /* Set NPORT-ID and LUN number*/
1712        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1713        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1714        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1715        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1716
1717        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1718        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1719
1720        /* Total Data and protection segment(s) */
1721        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1722
1723        /* Build IOCB segments and adjust for data protection segments */
1724        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1725            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1726                QLA_SUCCESS)
1727                goto queuing_error;
1728
1729        cmd_pkt->entry_count = (uint8_t)req_cnt;
1730        /* Specify response queue number where completion should happen */
1731        cmd_pkt->entry_status = (uint8_t) rsp->id;
1732        cmd_pkt->timeout = cpu_to_le16(0);
1733        wmb();
1734
1735        /* Adjust ring index. */
1736        req->ring_index++;
1737        if (req->ring_index == req->length) {
1738                req->ring_index = 0;
1739                req->ring_ptr = req->ring;
1740        } else
1741                req->ring_ptr++;
1742
1743        /* Set chip new ring index. */
1744        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1745        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1746
1747        /* Manage unprocessed RIO/ZIO commands in response queue. */
1748        if (vha->flags.process_response_queue &&
1749            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1750                qla24xx_process_response_queue(vha, rsp);
1751
1752        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1753
1754        return QLA_SUCCESS;
1755
1756queuing_error:
1757        if (status & QDSS_GOT_Q_SPACE) {
1758                req->outstanding_cmds[handle] = NULL;
1759                req->cnt += req_cnt;
1760        }
1761        /* Cleanup will be performed by the caller (queuecommand) */
1762
1763        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1764        return QLA_FUNCTION_FAILED;
1765}
1766
1767
1768static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1769{
1770        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1771        struct qla_hw_data *ha = sp->fcport->vha->hw;
1772        int affinity = cmd->request->cpu;
1773
1774        if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1775                affinity < ha->max_rsp_queues - 1)
1776                *rsp = ha->rsp_q_map[affinity + 1];
1777         else
1778                *rsp = ha->rsp_q_map[0];
1779}
1780
1781/* Generic Control-SRB manipulation functions. */
1782
1783/* hardware_lock assumed to be held. */
1784void *
1785qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
1786{
1787        if (qla2x00_reset_active(vha))
1788                return NULL;
1789
1790        return qla2x00_alloc_iocbs(vha, sp);
1791}
1792
1793void *
1794qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1795{
1796        struct qla_hw_data *ha = vha->hw;
1797        struct req_que *req = ha->req_q_map[0];
1798        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
1799        uint32_t index, handle;
1800        request_t *pkt;
1801        uint16_t cnt, req_cnt;
1802
1803        pkt = NULL;
1804        req_cnt = 1;
1805        handle = 0;
1806
1807        if (!sp)
1808                goto skip_cmd_array;
1809
1810        /* Check for room in outstanding command list. */
1811        handle = req->current_outstanding_cmd;
1812        for (index = 1; index < req->num_outstanding_cmds; index++) {
1813                handle++;
1814                if (handle == req->num_outstanding_cmds)
1815                        handle = 1;
1816                if (!req->outstanding_cmds[handle])
1817                        break;
1818        }
1819        if (index == req->num_outstanding_cmds) {
1820                ql_log(ql_log_warn, vha, 0x700b,
1821                    "No room on outstanding cmd array.\n");
1822                goto queuing_error;
1823        }
1824
1825        /* Prep command array. */
1826        req->current_outstanding_cmd = handle;
1827        req->outstanding_cmds[handle] = sp;
1828        sp->handle = handle;
1829
1830        /* Adjust entry-counts as needed. */
1831        if (sp->type != SRB_SCSI_CMD)
1832                req_cnt = sp->iocbs;
1833
1834skip_cmd_array:
1835        /* Check for room on request queue. */
1836        if (req->cnt < req_cnt + 2) {
1837                if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1838                        cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1839                else if (IS_P3P_TYPE(ha))
1840                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1841                else if (IS_FWI2_CAPABLE(ha))
1842                        cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1843                else if (IS_QLAFX00(ha))
1844                        cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1845                else
1846                        cnt = qla2x00_debounce_register(
1847                            ISP_REQ_Q_OUT(ha, &reg->isp));
1848
1849                if  (req->ring_index < cnt)
1850                        req->cnt = cnt - req->ring_index;
1851                else
1852                        req->cnt = req->length -
1853                            (req->ring_index - cnt);
1854        }
1855        if (req->cnt < req_cnt + 2)
1856                goto queuing_error;
1857
1858        /* Prep packet */
1859        req->cnt -= req_cnt;
1860        pkt = req->ring_ptr;
1861        memset(pkt, 0, REQUEST_ENTRY_SIZE);
1862        if (IS_QLAFX00(ha)) {
1863                WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1864                WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1865        } else {
1866                pkt->entry_count = req_cnt;
1867                pkt->handle = handle;
1868        }
1869
1870queuing_error:
1871        vha->tgt_counters.num_alloc_iocb_failed++;
1872        return pkt;
1873}
1874
1875static void
1876qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1877{
1878        struct srb_iocb *lio = &sp->u.iocb_cmd;
1879
1880        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1881        logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1882        if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1883                logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1884        if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1885                logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1886        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1887        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1888        logio->port_id[1] = sp->fcport->d_id.b.area;
1889        logio->port_id[2] = sp->fcport->d_id.b.domain;
1890        logio->vp_index = sp->fcport->vha->vp_idx;
1891}
1892
1893static void
1894qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1895{
1896        struct qla_hw_data *ha = sp->fcport->vha->hw;
1897        struct srb_iocb *lio = &sp->u.iocb_cmd;
1898        uint16_t opts;
1899
1900        mbx->entry_type = MBX_IOCB_TYPE;
1901        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1902        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1903        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1904        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1905        if (HAS_EXTENDED_IDS(ha)) {
1906                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1907                mbx->mb10 = cpu_to_le16(opts);
1908        } else {
1909                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1910        }
1911        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1912        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1913            sp->fcport->d_id.b.al_pa);
1914        mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1915}
1916
1917static void
1918qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1919{
1920        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1921        logio->control_flags =
1922            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1923        if (!sp->fcport->tgt_session ||
1924            !sp->fcport->tgt_session->keep_nport_handle)
1925                logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1926        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1927        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1928        logio->port_id[1] = sp->fcport->d_id.b.area;
1929        logio->port_id[2] = sp->fcport->d_id.b.domain;
1930        logio->vp_index = sp->fcport->vha->vp_idx;
1931}
1932
1933static void
1934qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1935{
1936        struct qla_hw_data *ha = sp->fcport->vha->hw;
1937
1938        mbx->entry_type = MBX_IOCB_TYPE;
1939        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1940        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1941        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1942            cpu_to_le16(sp->fcport->loop_id):
1943            cpu_to_le16(sp->fcport->loop_id << 8);
1944        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1945        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1946            sp->fcport->d_id.b.al_pa);
1947        mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1948        /* Implicit: mbx->mbx10 = 0. */
1949}
1950
1951static void
1952qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1953{
1954        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1955        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1956        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1957        logio->vp_index = sp->fcport->vha->vp_idx;
1958}
1959
1960static void
1961qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1962{
1963        struct qla_hw_data *ha = sp->fcport->vha->hw;
1964
1965        mbx->entry_type = MBX_IOCB_TYPE;
1966        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1967        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1968        if (HAS_EXTENDED_IDS(ha)) {
1969                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1970                mbx->mb10 = cpu_to_le16(BIT_0);
1971        } else {
1972                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1973        }
1974        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1975        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1976        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1977        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1978        mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1979}
1980
1981static void
1982qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1983{
1984        uint32_t flags;
1985        uint64_t lun;
1986        struct fc_port *fcport = sp->fcport;
1987        scsi_qla_host_t *vha = fcport->vha;
1988        struct qla_hw_data *ha = vha->hw;
1989        struct srb_iocb *iocb = &sp->u.iocb_cmd;
1990        struct req_que *req = vha->req;
1991
1992        flags = iocb->u.tmf.flags;
1993        lun = iocb->u.tmf.lun;
1994
1995        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1996        tsk->entry_count = 1;
1997        tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1998        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1999        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2000        tsk->control_flags = cpu_to_le32(flags);
2001        tsk->port_id[0] = fcport->d_id.b.al_pa;
2002        tsk->port_id[1] = fcport->d_id.b.area;
2003        tsk->port_id[2] = fcport->d_id.b.domain;
2004        tsk->vp_index = fcport->vha->vp_idx;
2005
2006        if (flags == TCF_LUN_RESET) {
2007                int_to_scsilun(lun, &tsk->lun);
2008                host_to_fcp_swap((uint8_t *)&tsk->lun,
2009                        sizeof(tsk->lun));
2010        }
2011}
2012
2013static void
2014qla2x00_els_dcmd_sp_free(void *ptr, void *data)
2015{
2016        struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr;
2017        struct qla_hw_data *ha = vha->hw;
2018        srb_t *sp = (srb_t *)data;
2019        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2020
2021        kfree(sp->fcport);
2022
2023        if (elsio->u.els_logo.els_logo_pyld)
2024                dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2025                    elsio->u.els_logo.els_logo_pyld,
2026                    elsio->u.els_logo.els_logo_pyld_dma);
2027
2028        del_timer(&elsio->timer);
2029        qla2x00_rel_sp(vha, sp);
2030}
2031
2032static void
2033qla2x00_els_dcmd_iocb_timeout(void *data)
2034{
2035        srb_t *sp = (srb_t *)data;
2036        struct srb_iocb *lio = &sp->u.iocb_cmd;
2037        fc_port_t *fcport = sp->fcport;
2038        struct scsi_qla_host *vha = fcport->vha;
2039        struct qla_hw_data *ha = vha->hw;
2040        unsigned long flags = 0;
2041
2042        ql_dbg(ql_dbg_io, vha, 0x3069,
2043            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2044            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2045            fcport->d_id.b.al_pa);
2046
2047        /* Abort the exchange */
2048        spin_lock_irqsave(&ha->hardware_lock, flags);
2049        if (ha->isp_ops->abort_command(sp)) {
2050                ql_dbg(ql_dbg_io, vha, 0x3070,
2051                    "mbx abort_command failed.\n");
2052        } else {
2053                ql_dbg(ql_dbg_io, vha, 0x3071,
2054                    "mbx abort_command success.\n");
2055        }
2056        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2057
2058        complete(&lio->u.els_logo.comp);
2059}
2060
2061static void
2062qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res)
2063{
2064        srb_t *sp = (srb_t *)ptr;
2065        fc_port_t *fcport = sp->fcport;
2066        struct srb_iocb *lio = &sp->u.iocb_cmd;
2067        struct scsi_qla_host *vha = fcport->vha;
2068
2069        ql_dbg(ql_dbg_io, vha, 0x3072,
2070            "%s hdl=%x, portid=%02x%02x%02x done\n",
2071            sp->name, sp->handle, fcport->d_id.b.domain,
2072            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2073
2074        complete(&lio->u.els_logo.comp);
2075}
2076
2077int
2078qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2079    port_id_t remote_did)
2080{
2081        srb_t *sp;
2082        fc_port_t *fcport = NULL;
2083        struct srb_iocb *elsio = NULL;
2084        struct qla_hw_data *ha = vha->hw;
2085        struct els_logo_payload logo_pyld;
2086        int rval = QLA_SUCCESS;
2087
2088        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2089        if (!fcport) {
2090               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2091               return -ENOMEM;
2092        }
2093
2094        /* Alloc SRB structure */
2095        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2096        if (!sp) {
2097                kfree(fcport);
2098                ql_log(ql_log_info, vha, 0x70e6,
2099                 "SRB allocation failed\n");
2100                return -ENOMEM;
2101        }
2102
2103        elsio = &sp->u.iocb_cmd;
2104        fcport->loop_id = 0xFFFF;
2105        fcport->d_id.b.domain = remote_did.b.domain;
2106        fcport->d_id.b.area = remote_did.b.area;
2107        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2108
2109        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2110            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2111
2112        sp->type = SRB_ELS_DCMD;
2113        sp->name = "ELS_DCMD";
2114        sp->fcport = fcport;
2115        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2116        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2117        sp->done = qla2x00_els_dcmd_sp_done;
2118        sp->free = qla2x00_els_dcmd_sp_free;
2119
2120        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2121                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2122                            GFP_KERNEL);
2123
2124        if (!elsio->u.els_logo.els_logo_pyld) {
2125                sp->free(vha, sp);
2126                return QLA_FUNCTION_FAILED;
2127        }
2128
2129        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2130
2131        elsio->u.els_logo.els_cmd = els_opcode;
2132        logo_pyld.opcode = els_opcode;
2133        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2134        logo_pyld.s_id[1] = vha->d_id.b.area;
2135        logo_pyld.s_id[2] = vha->d_id.b.domain;
2136        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2137        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2138
2139        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2140            sizeof(struct els_logo_payload));
2141
2142        rval = qla2x00_start_sp(sp);
2143        if (rval != QLA_SUCCESS) {
2144                sp->free(vha, sp);
2145                return QLA_FUNCTION_FAILED;
2146        }
2147
2148        ql_dbg(ql_dbg_io, vha, 0x3074,
2149            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2150            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2151            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2152
2153        wait_for_completion(&elsio->u.els_logo.comp);
2154
2155        sp->free(vha, sp);
2156        return rval;
2157}
2158
2159static void
2160qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2161{
2162        scsi_qla_host_t *vha = sp->fcport->vha;
2163        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2164
2165        els_iocb->entry_type = ELS_IOCB_TYPE;
2166        els_iocb->entry_count = 1;
2167        els_iocb->sys_define = 0;
2168        els_iocb->entry_status = 0;
2169        els_iocb->handle = sp->handle;
2170        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2171        els_iocb->tx_dsd_count = 1;
2172        els_iocb->vp_index = vha->vp_idx;
2173        els_iocb->sof_type = EST_SOFI3;
2174        els_iocb->rx_dsd_count = 0;
2175        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2176
2177        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2178        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2179        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2180        els_iocb->control_flags = 0;
2181
2182        els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2183        els_iocb->tx_address[0] =
2184            cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2185        els_iocb->tx_address[1] =
2186            cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2187        els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2188
2189        els_iocb->rx_byte_count = 0;
2190        els_iocb->rx_address[0] = 0;
2191        els_iocb->rx_address[1] = 0;
2192        els_iocb->rx_len = 0;
2193
2194        sp->fcport->vha->qla_stats.control_requests++;
2195}
2196
2197static void
2198qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2199{
2200        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2201
2202        els_iocb->entry_type = ELS_IOCB_TYPE;
2203        els_iocb->entry_count = 1;
2204        els_iocb->sys_define = 0;
2205        els_iocb->entry_status = 0;
2206        els_iocb->handle = sp->handle;
2207        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2208        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2209        els_iocb->vp_index = sp->fcport->vha->vp_idx;
2210        els_iocb->sof_type = EST_SOFI3;
2211        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2212
2213        els_iocb->opcode =
2214            sp->type == SRB_ELS_CMD_RPT ?
2215            bsg_job->request->rqst_data.r_els.els_code :
2216            bsg_job->request->rqst_data.h_els.command_code;
2217        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2218        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2219        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2220        els_iocb->control_flags = 0;
2221        els_iocb->rx_byte_count =
2222            cpu_to_le32(bsg_job->reply_payload.payload_len);
2223        els_iocb->tx_byte_count =
2224            cpu_to_le32(bsg_job->request_payload.payload_len);
2225
2226        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2227            (bsg_job->request_payload.sg_list)));
2228        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2229            (bsg_job->request_payload.sg_list)));
2230        els_iocb->tx_len = cpu_to_le32(sg_dma_len
2231            (bsg_job->request_payload.sg_list));
2232
2233        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2234            (bsg_job->reply_payload.sg_list)));
2235        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2236            (bsg_job->reply_payload.sg_list)));
2237        els_iocb->rx_len = cpu_to_le32(sg_dma_len
2238            (bsg_job->reply_payload.sg_list));
2239
2240        sp->fcport->vha->qla_stats.control_requests++;
2241}
2242
2243static void
2244qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2245{
2246        uint16_t        avail_dsds;
2247        uint32_t        *cur_dsd;
2248        struct scatterlist *sg;
2249        int index;
2250        uint16_t tot_dsds;
2251        scsi_qla_host_t *vha = sp->fcport->vha;
2252        struct qla_hw_data *ha = vha->hw;
2253        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2254        int loop_iterartion = 0;
2255        int entry_count = 1;
2256
2257        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2258        ct_iocb->entry_type = CT_IOCB_TYPE;
2259        ct_iocb->entry_status = 0;
2260        ct_iocb->handle1 = sp->handle;
2261        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2262        ct_iocb->status = cpu_to_le16(0);
2263        ct_iocb->control_flags = cpu_to_le16(0);
2264        ct_iocb->timeout = 0;
2265        ct_iocb->cmd_dsd_count =
2266            cpu_to_le16(bsg_job->request_payload.sg_cnt);
2267        ct_iocb->total_dsd_count =
2268            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2269        ct_iocb->req_bytecount =
2270            cpu_to_le32(bsg_job->request_payload.payload_len);
2271        ct_iocb->rsp_bytecount =
2272            cpu_to_le32(bsg_job->reply_payload.payload_len);
2273
2274        ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2275            (bsg_job->request_payload.sg_list)));
2276        ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2277            (bsg_job->request_payload.sg_list)));
2278        ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2279
2280        ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2281            (bsg_job->reply_payload.sg_list)));
2282        ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2283            (bsg_job->reply_payload.sg_list)));
2284        ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2285
2286        avail_dsds = 1;
2287        cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2288        index = 0;
2289        tot_dsds = bsg_job->reply_payload.sg_cnt;
2290
2291        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2292                dma_addr_t       sle_dma;
2293                cont_a64_entry_t *cont_pkt;
2294
2295                /* Allocate additional continuation packets? */
2296                if (avail_dsds == 0) {
2297                        /*
2298                        * Five DSDs are available in the Cont.
2299                        * Type 1 IOCB.
2300                               */
2301                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2302                            vha->hw->req_q_map[0]);
2303                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2304                        avail_dsds = 5;
2305                        entry_count++;
2306                }
2307
2308                sle_dma = sg_dma_address(sg);
2309                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2310                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2311                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2312                loop_iterartion++;
2313                avail_dsds--;
2314        }
2315        ct_iocb->entry_count = entry_count;
2316
2317        sp->fcport->vha->qla_stats.control_requests++;
2318}
2319
2320static void
2321qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2322{
2323        uint16_t        avail_dsds;
2324        uint32_t        *cur_dsd;
2325        struct scatterlist *sg;
2326        int index;
2327        uint16_t tot_dsds;
2328        scsi_qla_host_t *vha = sp->fcport->vha;
2329        struct qla_hw_data *ha = vha->hw;
2330        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2331        int loop_iterartion = 0;
2332        int entry_count = 1;
2333
2334        ct_iocb->entry_type = CT_IOCB_TYPE;
2335        ct_iocb->entry_status = 0;
2336        ct_iocb->sys_define = 0;
2337        ct_iocb->handle = sp->handle;
2338
2339        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2340        ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2341        ct_iocb->comp_status = cpu_to_le16(0);
2342
2343        ct_iocb->cmd_dsd_count =
2344                cpu_to_le16(bsg_job->request_payload.sg_cnt);
2345        ct_iocb->timeout = 0;
2346        ct_iocb->rsp_dsd_count =
2347                cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2348        ct_iocb->rsp_byte_count =
2349            cpu_to_le32(bsg_job->reply_payload.payload_len);
2350        ct_iocb->cmd_byte_count =
2351            cpu_to_le32(bsg_job->request_payload.payload_len);
2352        ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2353            (bsg_job->request_payload.sg_list)));
2354        ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2355           (bsg_job->request_payload.sg_list)));
2356        ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2357            (bsg_job->request_payload.sg_list));
2358
2359        avail_dsds = 1;
2360        cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2361        index = 0;
2362        tot_dsds = bsg_job->reply_payload.sg_cnt;
2363
2364        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2365                dma_addr_t       sle_dma;
2366                cont_a64_entry_t *cont_pkt;
2367
2368                /* Allocate additional continuation packets? */
2369                if (avail_dsds == 0) {
2370                        /*
2371                        * Five DSDs are available in the Cont.
2372                        * Type 1 IOCB.
2373                               */
2374                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2375                            ha->req_q_map[0]);
2376                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2377                        avail_dsds = 5;
2378                        entry_count++;
2379                }
2380
2381                sle_dma = sg_dma_address(sg);
2382                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2383                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2384                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2385                loop_iterartion++;
2386                avail_dsds--;
2387        }
2388        ct_iocb->entry_count = entry_count;
2389}
2390
2391/*
2392 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2393 * @sp: command to send to the ISP
2394 *
2395 * Returns non-zero if a failure occurred, else zero.
2396 */
2397int
2398qla82xx_start_scsi(srb_t *sp)
2399{
2400        int             nseg;
2401        unsigned long   flags;
2402        struct scsi_cmnd *cmd;
2403        uint32_t        *clr_ptr;
2404        uint32_t        index;
2405        uint32_t        handle;
2406        uint16_t        cnt;
2407        uint16_t        req_cnt;
2408        uint16_t        tot_dsds;
2409        struct device_reg_82xx __iomem *reg;
2410        uint32_t dbval;
2411        uint32_t *fcp_dl;
2412        uint8_t additional_cdb_len;
2413        struct ct6_dsd *ctx;
2414        struct scsi_qla_host *vha = sp->fcport->vha;
2415        struct qla_hw_data *ha = vha->hw;
2416        struct req_que *req = NULL;
2417        struct rsp_que *rsp = NULL;
2418
2419        /* Setup device pointers. */
2420        reg = &ha->iobase->isp82;
2421        cmd = GET_CMD_SP(sp);
2422        req = vha->req;
2423        rsp = ha->rsp_q_map[0];
2424
2425        /* So we know we haven't pci_map'ed anything yet */
2426        tot_dsds = 0;
2427
2428        dbval = 0x04 | (ha->portnum << 5);
2429
2430        /* Send marker if required */
2431        if (vha->marker_needed != 0) {
2432                if (qla2x00_marker(vha, req,
2433                        rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2434                        ql_log(ql_log_warn, vha, 0x300c,
2435                            "qla2x00_marker failed for cmd=%p.\n", cmd);
2436                        return QLA_FUNCTION_FAILED;
2437                }
2438                vha->marker_needed = 0;
2439        }
2440
2441        /* Acquire ring specific lock */
2442        spin_lock_irqsave(&ha->hardware_lock, flags);
2443
2444        /* Check for room in outstanding command list. */
2445        handle = req->current_outstanding_cmd;
2446        for (index = 1; index < req->num_outstanding_cmds; index++) {
2447                handle++;
2448                if (handle == req->num_outstanding_cmds)
2449                        handle = 1;
2450                if (!req->outstanding_cmds[handle])
2451                        break;
2452        }
2453        if (index == req->num_outstanding_cmds)
2454                goto queuing_error;
2455
2456        /* Map the sg table so we have an accurate count of sg entries needed */
2457        if (scsi_sg_count(cmd)) {
2458                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2459                    scsi_sg_count(cmd), cmd->sc_data_direction);
2460                if (unlikely(!nseg))
2461                        goto queuing_error;
2462        } else
2463                nseg = 0;
2464
2465        tot_dsds = nseg;
2466
2467        if (tot_dsds > ql2xshiftctondsd) {
2468                struct cmd_type_6 *cmd_pkt;
2469                uint16_t more_dsd_lists = 0;
2470                struct dsd_dma *dsd_ptr;
2471                uint16_t i;
2472
2473                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2474                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2475                        ql_dbg(ql_dbg_io, vha, 0x300d,
2476                            "Num of DSD list %d is than %d for cmd=%p.\n",
2477                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2478                            cmd);
2479                        goto queuing_error;
2480                }
2481
2482                if (more_dsd_lists <= ha->gbl_dsd_avail)
2483                        goto sufficient_dsds;
2484                else
2485                        more_dsd_lists -= ha->gbl_dsd_avail;
2486
2487                for (i = 0; i < more_dsd_lists; i++) {
2488                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2489                        if (!dsd_ptr) {
2490                                ql_log(ql_log_fatal, vha, 0x300e,
2491                                    "Failed to allocate memory for dsd_dma "
2492                                    "for cmd=%p.\n", cmd);
2493                                goto queuing_error;
2494                        }
2495
2496                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2497                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2498                        if (!dsd_ptr->dsd_addr) {
2499                                kfree(dsd_ptr);
2500                                ql_log(ql_log_fatal, vha, 0x300f,
2501                                    "Failed to allocate memory for dsd_addr "
2502                                    "for cmd=%p.\n", cmd);
2503                                goto queuing_error;
2504                        }
2505                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2506                        ha->gbl_dsd_avail++;
2507                }
2508
2509sufficient_dsds:
2510                req_cnt = 1;
2511
2512                if (req->cnt < (req_cnt + 2)) {
2513                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2514                                &reg->req_q_out[0]);
2515                        if (req->ring_index < cnt)
2516                                req->cnt = cnt - req->ring_index;
2517                        else
2518                                req->cnt = req->length -
2519                                        (req->ring_index - cnt);
2520                        if (req->cnt < (req_cnt + 2))
2521                                goto queuing_error;
2522                }
2523
2524                ctx = sp->u.scmd.ctx =
2525                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2526                if (!ctx) {
2527                        ql_log(ql_log_fatal, vha, 0x3010,
2528                            "Failed to allocate ctx for cmd=%p.\n", cmd);
2529                        goto queuing_error;
2530                }
2531
2532                memset(ctx, 0, sizeof(struct ct6_dsd));
2533                ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2534                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2535                if (!ctx->fcp_cmnd) {
2536                        ql_log(ql_log_fatal, vha, 0x3011,
2537                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2538                        goto queuing_error;
2539                }
2540
2541                /* Initialize the DSD list and dma handle */
2542                INIT_LIST_HEAD(&ctx->dsd_list);
2543                ctx->dsd_use_cnt = 0;
2544
2545                if (cmd->cmd_len > 16) {
2546                        additional_cdb_len = cmd->cmd_len - 16;
2547                        if ((cmd->cmd_len % 4) != 0) {
2548                                /* SCSI command bigger than 16 bytes must be
2549                                 * multiple of 4
2550                                 */
2551                                ql_log(ql_log_warn, vha, 0x3012,
2552                                    "scsi cmd len %d not multiple of 4 "
2553                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
2554                                goto queuing_error_fcp_cmnd;
2555                        }
2556                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2557                } else {
2558                        additional_cdb_len = 0;
2559                        ctx->fcp_cmnd_len = 12 + 16 + 4;
2560                }
2561
2562                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2563                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2564
2565                /* Zero out remaining portion of packet. */
2566                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2567                clr_ptr = (uint32_t *)cmd_pkt + 2;
2568                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2569                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2570
2571                /* Set NPORT-ID and LUN number*/
2572                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2573                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2574                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2575                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2576                cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2577
2578                /* Build IOCB segments */
2579                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2580                        goto queuing_error_fcp_cmnd;
2581
2582                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2583                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2584
2585                /* build FCP_CMND IU */
2586                memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2587                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2588                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2589
2590                if (cmd->sc_data_direction == DMA_TO_DEVICE)
2591                        ctx->fcp_cmnd->additional_cdb_len |= 1;
2592                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2593                        ctx->fcp_cmnd->additional_cdb_len |= 2;
2594
2595                /* Populate the FCP_PRIO. */
2596                if (ha->flags.fcp_prio_enabled)
2597                        ctx->fcp_cmnd->task_attribute |=
2598                            sp->fcport->fcp_prio << 3;
2599
2600                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2601
2602                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2603                    additional_cdb_len);
2604                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2605
2606                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2607                cmd_pkt->fcp_cmnd_dseg_address[0] =
2608                    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2609                cmd_pkt->fcp_cmnd_dseg_address[1] =
2610                    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2611
2612                sp->flags |= SRB_FCP_CMND_DMA_VALID;
2613                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2614                /* Set total data segment count. */
2615                cmd_pkt->entry_count = (uint8_t)req_cnt;
2616                /* Specify response queue number where
2617                 * completion should happen
2618                 */
2619                cmd_pkt->entry_status = (uint8_t) rsp->id;
2620        } else {
2621                struct cmd_type_7 *cmd_pkt;
2622                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2623                if (req->cnt < (req_cnt + 2)) {
2624                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2625                            &reg->req_q_out[0]);
2626                        if (req->ring_index < cnt)
2627                                req->cnt = cnt - req->ring_index;
2628                        else
2629                                req->cnt = req->length -
2630                                        (req->ring_index - cnt);
2631                }
2632                if (req->cnt < (req_cnt + 2))
2633                        goto queuing_error;
2634
2635                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2636                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2637
2638                /* Zero out remaining portion of packet. */
2639                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2640                clr_ptr = (uint32_t *)cmd_pkt + 2;
2641                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2642                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2643
2644                /* Set NPORT-ID and LUN number*/
2645                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2646                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2647                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2648                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2649                cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2650
2651                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2652                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2653                    sizeof(cmd_pkt->lun));
2654
2655                /* Populate the FCP_PRIO. */
2656                if (ha->flags.fcp_prio_enabled)
2657                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2658
2659                /* Load SCSI command packet. */
2660                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2661                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2662
2663                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2664
2665                /* Build IOCB segments */
2666                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2667
2668                /* Set total data segment count. */
2669                cmd_pkt->entry_count = (uint8_t)req_cnt;
2670                /* Specify response queue number where
2671                 * completion should happen.
2672                 */
2673                cmd_pkt->entry_status = (uint8_t) rsp->id;
2674
2675        }
2676        /* Build command packet. */
2677        req->current_outstanding_cmd = handle;
2678        req->outstanding_cmds[handle] = sp;
2679        sp->handle = handle;
2680        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2681        req->cnt -= req_cnt;
2682        wmb();
2683
2684        /* Adjust ring index. */
2685        req->ring_index++;
2686        if (req->ring_index == req->length) {
2687                req->ring_index = 0;
2688                req->ring_ptr = req->ring;
2689        } else
2690                req->ring_ptr++;
2691
2692        sp->flags |= SRB_DMA_VALID;
2693
2694        /* Set chip new ring index. */
2695        /* write, read and verify logic */
2696        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2697        if (ql2xdbwr)
2698                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
2699        else {
2700                WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
2701                wmb();
2702                while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2703                        WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
2704                        wmb();
2705                }
2706        }
2707
2708        /* Manage unprocessed RIO/ZIO commands in response queue. */
2709        if (vha->flags.process_response_queue &&
2710            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2711                qla24xx_process_response_queue(vha, rsp);
2712
2713        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2714        return QLA_SUCCESS;
2715
2716queuing_error_fcp_cmnd:
2717        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2718queuing_error:
2719        if (tot_dsds)
2720                scsi_dma_unmap(cmd);
2721
2722        if (sp->u.scmd.ctx) {
2723                mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2724                sp->u.scmd.ctx = NULL;
2725        }
2726        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2727
2728        return QLA_FUNCTION_FAILED;
2729}
2730
2731static void
2732qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2733{
2734        struct srb_iocb *aio = &sp->u.iocb_cmd;
2735        scsi_qla_host_t *vha = sp->fcport->vha;
2736        struct req_que *req = vha->req;
2737
2738        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2739        abt_iocb->entry_type = ABORT_IOCB_TYPE;
2740        abt_iocb->entry_count = 1;
2741        abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2742        abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2743        abt_iocb->handle_to_abort =
2744            cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2745        abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2746        abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2747        abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2748        abt_iocb->vp_index = vha->vp_idx;
2749        abt_iocb->req_que_no = cpu_to_le16(req->id);
2750        /* Send the command to the firmware */
2751        wmb();
2752}
2753
2754int
2755qla2x00_start_sp(srb_t *sp)
2756{
2757        int rval;
2758        struct qla_hw_data *ha = sp->fcport->vha->hw;
2759        void *pkt;
2760        unsigned long flags;
2761
2762        rval = QLA_FUNCTION_FAILED;
2763        spin_lock_irqsave(&ha->hardware_lock, flags);
2764        pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2765        if (!pkt) {
2766                ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2767                    "qla2x00_alloc_iocbs failed.\n");
2768                goto done;
2769        }
2770
2771        rval = QLA_SUCCESS;
2772        switch (sp->type) {
2773        case SRB_LOGIN_CMD:
2774                IS_FWI2_CAPABLE(ha) ?
2775                    qla24xx_login_iocb(sp, pkt) :
2776                    qla2x00_login_iocb(sp, pkt);
2777                break;
2778        case SRB_LOGOUT_CMD:
2779                IS_FWI2_CAPABLE(ha) ?
2780                    qla24xx_logout_iocb(sp, pkt) :
2781                    qla2x00_logout_iocb(sp, pkt);
2782                break;
2783        case SRB_ELS_CMD_RPT:
2784        case SRB_ELS_CMD_HST:
2785                qla24xx_els_iocb(sp, pkt);
2786                break;
2787        case SRB_CT_CMD:
2788                IS_FWI2_CAPABLE(ha) ?
2789                    qla24xx_ct_iocb(sp, pkt) :
2790                    qla2x00_ct_iocb(sp, pkt);
2791                break;
2792        case SRB_ADISC_CMD:
2793                IS_FWI2_CAPABLE(ha) ?
2794                    qla24xx_adisc_iocb(sp, pkt) :
2795                    qla2x00_adisc_iocb(sp, pkt);
2796                break;
2797        case SRB_TM_CMD:
2798                IS_QLAFX00(ha) ?
2799                    qlafx00_tm_iocb(sp, pkt) :
2800                    qla24xx_tm_iocb(sp, pkt);
2801                break;
2802        case SRB_FXIOCB_DCMD:
2803        case SRB_FXIOCB_BCMD:
2804                qlafx00_fxdisc_iocb(sp, pkt);
2805                break;
2806        case SRB_ABT_CMD:
2807                IS_QLAFX00(ha) ?
2808                        qlafx00_abort_iocb(sp, pkt) :
2809                        qla24xx_abort_iocb(sp, pkt);
2810                break;
2811        case SRB_ELS_DCMD:
2812                qla24xx_els_logo_iocb(sp, pkt);
2813                break;
2814        default:
2815                break;
2816        }
2817
2818        wmb();
2819        qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2820done:
2821        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2822        return rval;
2823}
2824
2825static void
2826qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2827                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2828{
2829        uint16_t avail_dsds;
2830        uint32_t *cur_dsd;
2831        uint32_t req_data_len = 0;
2832        uint32_t rsp_data_len = 0;
2833        struct scatterlist *sg;
2834        int index;
2835        int entry_count = 1;
2836        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2837
2838        /*Update entry type to indicate bidir command */
2839        *((uint32_t *)(&cmd_pkt->entry_type)) =
2840                cpu_to_le32(COMMAND_BIDIRECTIONAL);
2841
2842        /* Set the transfer direction, in this set both flags
2843         * Also set the BD_WRAP_BACK flag, firmware will take care
2844         * assigning DID=SID for outgoing pkts.
2845         */
2846        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2847        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2848        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2849                                                        BD_WRAP_BACK);
2850
2851        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2852        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2853        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2854        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2855
2856        vha->bidi_stats.transfer_bytes += req_data_len;
2857        vha->bidi_stats.io_count++;
2858
2859        vha->qla_stats.output_bytes += req_data_len;
2860        vha->qla_stats.output_requests++;
2861
2862        /* Only one dsd is available for bidirectional IOCB, remaining dsds
2863         * are bundled in continuation iocb
2864         */
2865        avail_dsds = 1;
2866        cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2867
2868        index = 0;
2869
2870        for_each_sg(bsg_job->request_payload.sg_list, sg,
2871                                bsg_job->request_payload.sg_cnt, index) {
2872                dma_addr_t sle_dma;
2873                cont_a64_entry_t *cont_pkt;
2874
2875                /* Allocate additional continuation packets */
2876                if (avail_dsds == 0) {
2877                        /* Continuation type 1 IOCB can accomodate
2878                         * 5 DSDS
2879                         */
2880                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2881                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2882                        avail_dsds = 5;
2883                        entry_count++;
2884                }
2885                sle_dma = sg_dma_address(sg);
2886                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2887                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2888                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2889                avail_dsds--;
2890        }
2891        /* For read request DSD will always goes to continuation IOCB
2892         * and follow the write DSD. If there is room on the current IOCB
2893         * then it is added to that IOCB else new continuation IOCB is
2894         * allocated.
2895         */
2896        for_each_sg(bsg_job->reply_payload.sg_list, sg,
2897                                bsg_job->reply_payload.sg_cnt, index) {
2898                dma_addr_t sle_dma;
2899                cont_a64_entry_t *cont_pkt;
2900
2901                /* Allocate additional continuation packets */
2902                if (avail_dsds == 0) {
2903                        /* Continuation type 1 IOCB can accomodate
2904                         * 5 DSDS
2905                         */
2906                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2907                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2908                        avail_dsds = 5;
2909                        entry_count++;
2910                }
2911                sle_dma = sg_dma_address(sg);
2912                *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2913                *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2914                *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2915                avail_dsds--;
2916        }
2917        /* This value should be same as number of IOCB required for this cmd */
2918        cmd_pkt->entry_count = entry_count;
2919}
2920
2921int
2922qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2923{
2924
2925        struct qla_hw_data *ha = vha->hw;
2926        unsigned long flags;
2927        uint32_t handle;
2928        uint32_t index;
2929        uint16_t req_cnt;
2930        uint16_t cnt;
2931        uint32_t *clr_ptr;
2932        struct cmd_bidir *cmd_pkt = NULL;
2933        struct rsp_que *rsp;
2934        struct req_que *req;
2935        int rval = EXT_STATUS_OK;
2936
2937        rval = QLA_SUCCESS;
2938
2939        rsp = ha->rsp_q_map[0];
2940        req = vha->req;
2941
2942        /* Send marker if required */
2943        if (vha->marker_needed != 0) {
2944                if (qla2x00_marker(vha, req,
2945                        rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2946                        return EXT_STATUS_MAILBOX;
2947                vha->marker_needed = 0;
2948        }
2949
2950        /* Acquire ring specific lock */
2951        spin_lock_irqsave(&ha->hardware_lock, flags);
2952
2953        /* Check for room in outstanding command list. */
2954        handle = req->current_outstanding_cmd;
2955        for (index = 1; index < req->num_outstanding_cmds; index++) {
2956                handle++;
2957                if (handle == req->num_outstanding_cmds)
2958                        handle = 1;
2959                if (!req->outstanding_cmds[handle])
2960                        break;
2961        }
2962
2963        if (index == req->num_outstanding_cmds) {
2964                rval = EXT_STATUS_BUSY;
2965                goto queuing_error;
2966        }
2967
2968        /* Calculate number of IOCB required */
2969        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2970
2971        /* Check for room on request queue. */
2972        if (req->cnt < req_cnt + 2) {
2973                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2974                    RD_REG_DWORD_RELAXED(req->req_q_out);
2975                if  (req->ring_index < cnt)
2976                        req->cnt = cnt - req->ring_index;
2977                else
2978                        req->cnt = req->length -
2979                                (req->ring_index - cnt);
2980        }
2981        if (req->cnt < req_cnt + 2) {
2982                rval = EXT_STATUS_BUSY;
2983                goto queuing_error;
2984        }
2985
2986        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2987        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2988
2989        /* Zero out remaining portion of packet. */
2990        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2991        clr_ptr = (uint32_t *)cmd_pkt + 2;
2992        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2993
2994        /* Set NPORT-ID  (of vha)*/
2995        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2996        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2997        cmd_pkt->port_id[1] = vha->d_id.b.area;
2998        cmd_pkt->port_id[2] = vha->d_id.b.domain;
2999
3000        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3001        cmd_pkt->entry_status = (uint8_t) rsp->id;
3002        /* Build command packet. */
3003        req->current_outstanding_cmd = handle;
3004        req->outstanding_cmds[handle] = sp;
3005        sp->handle = handle;
3006        req->cnt -= req_cnt;
3007
3008        /* Send the command to the firmware */
3009        wmb();
3010        qla2x00_start_iocbs(vha, req);
3011queuing_error:
3012        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3013        return rval;
3014}
3015