linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_target.h"
   9
  10#include <linux/blkdev.h>
  11#include <linux/delay.h>
  12
  13#include <scsi/scsi_tcq.h>
  14
  15/**
  16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17 * @sp: SCSI command
  18 *
  19 * Returns the proper CF_* direction based on CDB.
  20 */
  21static inline uint16_t
  22qla2x00_get_cmd_direction(srb_t *sp)
  23{
  24        uint16_t cflags;
  25        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  26        struct scsi_qla_host *vha = sp->vha;
  27
  28        cflags = 0;
  29
  30        /* Set transfer direction */
  31        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  32                cflags = CF_WRITE;
  33                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  34                vha->qla_stats.output_requests++;
  35        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  36                cflags = CF_READ;
  37                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  38                vha->qla_stats.input_requests++;
  39        }
  40        return (cflags);
  41}
  42
  43/**
  44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  45 * Continuation Type 0 IOCBs to allocate.
  46 *
  47 * @dsds: number of data segment descriptors needed
  48 *
  49 * Returns the number of IOCB entries needed to store @dsds.
  50 */
  51uint16_t
  52qla2x00_calc_iocbs_32(uint16_t dsds)
  53{
  54        uint16_t iocbs;
  55
  56        iocbs = 1;
  57        if (dsds > 3) {
  58                iocbs += (dsds - 3) / 7;
  59                if ((dsds - 3) % 7)
  60                        iocbs++;
  61        }
  62        return (iocbs);
  63}
  64
  65/**
  66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  67 * Continuation Type 1 IOCBs to allocate.
  68 *
  69 * @dsds: number of data segment descriptors needed
  70 *
  71 * Returns the number of IOCB entries needed to store @dsds.
  72 */
  73uint16_t
  74qla2x00_calc_iocbs_64(uint16_t dsds)
  75{
  76        uint16_t iocbs;
  77
  78        iocbs = 1;
  79        if (dsds > 2) {
  80                iocbs += (dsds - 2) / 5;
  81                if ((dsds - 2) % 5)
  82                        iocbs++;
  83        }
  84        return (iocbs);
  85}
  86
  87/**
  88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  89 * @vha: HA context
  90 *
  91 * Returns a pointer to the Continuation Type 0 IOCB packet.
  92 */
  93static inline cont_entry_t *
  94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  95{
  96        cont_entry_t *cont_pkt;
  97        struct req_que *req = vha->req;
  98        /* Adjust ring index. */
  99        req->ring_index++;
 100        if (req->ring_index == req->length) {
 101                req->ring_index = 0;
 102                req->ring_ptr = req->ring;
 103        } else {
 104                req->ring_ptr++;
 105        }
 106
 107        cont_pkt = (cont_entry_t *)req->ring_ptr;
 108
 109        /* Load packet defaults. */
 110        put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 111
 112        return (cont_pkt);
 113}
 114
 115/**
 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 117 * @vha: HA context
 118 * @req: request queue
 119 *
 120 * Returns a pointer to the continuation type 1 IOCB packet.
 121 */
 122cont_a64_entry_t *
 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 124{
 125        cont_a64_entry_t *cont_pkt;
 126
 127        /* Adjust ring index. */
 128        req->ring_index++;
 129        if (req->ring_index == req->length) {
 130                req->ring_index = 0;
 131                req->ring_ptr = req->ring;
 132        } else {
 133                req->ring_ptr++;
 134        }
 135
 136        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 137
 138        /* Load packet defaults. */
 139        put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
 140                           CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 141
 142        return (cont_pkt);
 143}
 144
 145inline int
 146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 147{
 148        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 149        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 150
 151        /* We always use DIFF Bundling for best performance */
 152        *fw_prot_opts = 0;
 153
 154        /* Translate SCSI opcode to a protection opcode */
 155        switch (scsi_get_prot_op(cmd)) {
 156        case SCSI_PROT_READ_STRIP:
 157                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 158                break;
 159        case SCSI_PROT_WRITE_INSERT:
 160                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 161                break;
 162        case SCSI_PROT_READ_INSERT:
 163                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 164                break;
 165        case SCSI_PROT_WRITE_STRIP:
 166                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 167                break;
 168        case SCSI_PROT_READ_PASS:
 169        case SCSI_PROT_WRITE_PASS:
 170                if (guard & SHOST_DIX_GUARD_IP)
 171                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 172                else
 173                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 174                break;
 175        default:        /* Normal Request */
 176                *fw_prot_opts |= PO_MODE_DIF_PASS;
 177                break;
 178        }
 179
 180        return scsi_prot_sg_count(cmd);
 181}
 182
 183/*
 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 185 * capable IOCB types.
 186 *
 187 * @sp: SRB command to process
 188 * @cmd_pkt: Command type 2 IOCB
 189 * @tot_dsds: Total number of segments to transfer
 190 */
 191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 192    uint16_t tot_dsds)
 193{
 194        uint16_t        avail_dsds;
 195        struct dsd32    *cur_dsd;
 196        scsi_qla_host_t *vha;
 197        struct scsi_cmnd *cmd;
 198        struct scatterlist *sg;
 199        int i;
 200
 201        cmd = GET_CMD_SP(sp);
 202
 203        /* Update entry type to indicate Command Type 2 IOCB */
 204        put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 205
 206        /* No data transfer */
 207        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 208                cmd_pkt->byte_count = cpu_to_le32(0);
 209                return;
 210        }
 211
 212        vha = sp->vha;
 213        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 214
 215        /* Three DSDs are available in the Command Type 2 IOCB */
 216        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
 217        cur_dsd = cmd_pkt->dsd32;
 218
 219        /* Load data segments */
 220        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 221                cont_entry_t *cont_pkt;
 222
 223                /* Allocate additional continuation packets? */
 224                if (avail_dsds == 0) {
 225                        /*
 226                         * Seven DSDs are available in the Continuation
 227                         * Type 0 IOCB.
 228                         */
 229                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 230                        cur_dsd = cont_pkt->dsd;
 231                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 232                }
 233
 234                append_dsd32(&cur_dsd, sg);
 235                avail_dsds--;
 236        }
 237}
 238
 239/**
 240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 241 * capable IOCB types.
 242 *
 243 * @sp: SRB command to process
 244 * @cmd_pkt: Command type 3 IOCB
 245 * @tot_dsds: Total number of segments to transfer
 246 */
 247void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 248    uint16_t tot_dsds)
 249{
 250        uint16_t        avail_dsds;
 251        struct dsd64    *cur_dsd;
 252        scsi_qla_host_t *vha;
 253        struct scsi_cmnd *cmd;
 254        struct scatterlist *sg;
 255        int i;
 256
 257        cmd = GET_CMD_SP(sp);
 258
 259        /* Update entry type to indicate Command Type 3 IOCB */
 260        put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 261
 262        /* No data transfer */
 263        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 264                cmd_pkt->byte_count = cpu_to_le32(0);
 265                return;
 266        }
 267
 268        vha = sp->vha;
 269        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 270
 271        /* Two DSDs are available in the Command Type 3 IOCB */
 272        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
 273        cur_dsd = cmd_pkt->dsd64;
 274
 275        /* Load data segments */
 276        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 277                cont_a64_entry_t *cont_pkt;
 278
 279                /* Allocate additional continuation packets? */
 280                if (avail_dsds == 0) {
 281                        /*
 282                         * Five DSDs are available in the Continuation
 283                         * Type 1 IOCB.
 284                         */
 285                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 286                        cur_dsd = cont_pkt->dsd;
 287                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 288                }
 289
 290                append_dsd64(&cur_dsd, sg);
 291                avail_dsds--;
 292        }
 293}
 294
 295/*
 296 * Find the first handle that is not in use, starting from
 297 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
 298 * associated with @req.
 299 */
 300uint32_t qla2xxx_get_next_handle(struct req_que *req)
 301{
 302        uint32_t index, handle = req->current_outstanding_cmd;
 303
 304        for (index = 1; index < req->num_outstanding_cmds; index++) {
 305                handle++;
 306                if (handle == req->num_outstanding_cmds)
 307                        handle = 1;
 308                if (!req->outstanding_cmds[handle])
 309                        return handle;
 310        }
 311
 312        return 0;
 313}
 314
 315/**
 316 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 317 * @sp: command to send to the ISP
 318 *
 319 * Returns non-zero if a failure occurred, else zero.
 320 */
 321int
 322qla2x00_start_scsi(srb_t *sp)
 323{
 324        int             nseg;
 325        unsigned long   flags;
 326        scsi_qla_host_t *vha;
 327        struct scsi_cmnd *cmd;
 328        uint32_t        *clr_ptr;
 329        uint32_t        handle;
 330        cmd_entry_t     *cmd_pkt;
 331        uint16_t        cnt;
 332        uint16_t        req_cnt;
 333        uint16_t        tot_dsds;
 334        struct device_reg_2xxx __iomem *reg;
 335        struct qla_hw_data *ha;
 336        struct req_que *req;
 337        struct rsp_que *rsp;
 338
 339        /* Setup device pointers. */
 340        vha = sp->vha;
 341        ha = vha->hw;
 342        reg = &ha->iobase->isp;
 343        cmd = GET_CMD_SP(sp);
 344        req = ha->req_q_map[0];
 345        rsp = ha->rsp_q_map[0];
 346        /* So we know we haven't pci_map'ed anything yet */
 347        tot_dsds = 0;
 348
 349        /* Send marker if required */
 350        if (vha->marker_needed != 0) {
 351                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 352                    QLA_SUCCESS) {
 353                        return (QLA_FUNCTION_FAILED);
 354                }
 355                vha->marker_needed = 0;
 356        }
 357
 358        /* Acquire ring specific lock */
 359        spin_lock_irqsave(&ha->hardware_lock, flags);
 360
 361        handle = qla2xxx_get_next_handle(req);
 362        if (handle == 0)
 363                goto queuing_error;
 364
 365        /* Map the sg table so we have an accurate count of sg entries needed */
 366        if (scsi_sg_count(cmd)) {
 367                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 368                    scsi_sg_count(cmd), cmd->sc_data_direction);
 369                if (unlikely(!nseg))
 370                        goto queuing_error;
 371        } else
 372                nseg = 0;
 373
 374        tot_dsds = nseg;
 375
 376        /* Calculate the number of request entries needed. */
 377        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 378        if (req->cnt < (req_cnt + 2)) {
 379                cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
 380                if (req->ring_index < cnt)
 381                        req->cnt = cnt - req->ring_index;
 382                else
 383                        req->cnt = req->length -
 384                            (req->ring_index - cnt);
 385                /* If still no head room then bail out */
 386                if (req->cnt < (req_cnt + 2))
 387                        goto queuing_error;
 388        }
 389
 390        /* Build command packet */
 391        req->current_outstanding_cmd = handle;
 392        req->outstanding_cmds[handle] = sp;
 393        sp->handle = handle;
 394        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 395        req->cnt -= req_cnt;
 396
 397        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 398        cmd_pkt->handle = handle;
 399        /* Zero out remaining portion of packet. */
 400        clr_ptr = (uint32_t *)cmd_pkt + 2;
 401        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 402        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 403
 404        /* Set target ID and LUN number*/
 405        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 406        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 407        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 408
 409        /* Load SCSI command packet. */
 410        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 411        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 412
 413        /* Build IOCB segments */
 414        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 415
 416        /* Set total data segment count. */
 417        cmd_pkt->entry_count = (uint8_t)req_cnt;
 418        wmb();
 419
 420        /* Adjust ring index. */
 421        req->ring_index++;
 422        if (req->ring_index == req->length) {
 423                req->ring_index = 0;
 424                req->ring_ptr = req->ring;
 425        } else
 426                req->ring_ptr++;
 427
 428        sp->flags |= SRB_DMA_VALID;
 429
 430        /* Set chip new ring index. */
 431        wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 432        rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 433
 434        /* Manage unprocessed RIO/ZIO commands in response queue. */
 435        if (vha->flags.process_response_queue &&
 436            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 437                qla2x00_process_response_queue(rsp);
 438
 439        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 440        return (QLA_SUCCESS);
 441
 442queuing_error:
 443        if (tot_dsds)
 444                scsi_dma_unmap(cmd);
 445
 446        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 447
 448        return (QLA_FUNCTION_FAILED);
 449}
 450
 451/**
 452 * qla2x00_start_iocbs() - Execute the IOCB command
 453 * @vha: HA context
 454 * @req: request queue
 455 */
 456void
 457qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 458{
 459        struct qla_hw_data *ha = vha->hw;
 460        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 461
 462        if (IS_P3P_TYPE(ha)) {
 463                qla82xx_start_iocbs(vha);
 464        } else {
 465                /* Adjust ring index. */
 466                req->ring_index++;
 467                if (req->ring_index == req->length) {
 468                        req->ring_index = 0;
 469                        req->ring_ptr = req->ring;
 470                } else
 471                        req->ring_ptr++;
 472
 473                /* Set chip new ring index. */
 474                if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 475                        wrt_reg_dword(req->req_q_in, req->ring_index);
 476                } else if (IS_QLA83XX(ha)) {
 477                        wrt_reg_dword(req->req_q_in, req->ring_index);
 478                        rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
 479                } else if (IS_QLAFX00(ha)) {
 480                        wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
 481                        rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
 482                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 483                } else if (IS_FWI2_CAPABLE(ha)) {
 484                        wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
 485                        rd_reg_dword_relaxed(&reg->isp24.req_q_in);
 486                } else {
 487                        wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
 488                                req->ring_index);
 489                        rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
 490                }
 491        }
 492}
 493
 494/**
 495 * __qla2x00_marker() - Send a marker IOCB to the firmware.
 496 * @vha: HA context
 497 * @qpair: queue pair pointer
 498 * @loop_id: loop ID
 499 * @lun: LUN
 500 * @type: marker modifier
 501 *
 502 * Can be called from both normal and interrupt context.
 503 *
 504 * Returns non-zero if a failure occurred, else zero.
 505 */
 506static int
 507__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 508    uint16_t loop_id, uint64_t lun, uint8_t type)
 509{
 510        mrk_entry_t *mrk;
 511        struct mrk_entry_24xx *mrk24 = NULL;
 512        struct req_que *req = qpair->req;
 513        struct qla_hw_data *ha = vha->hw;
 514        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 515
 516        mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 517        if (mrk == NULL) {
 518                ql_log(ql_log_warn, base_vha, 0x3026,
 519                    "Failed to allocate Marker IOCB.\n");
 520
 521                return (QLA_FUNCTION_FAILED);
 522        }
 523
 524        mrk->entry_type = MARKER_TYPE;
 525        mrk->modifier = type;
 526        if (type != MK_SYNC_ALL) {
 527                if (IS_FWI2_CAPABLE(ha)) {
 528                        mrk24 = (struct mrk_entry_24xx *) mrk;
 529                        mrk24->nport_handle = cpu_to_le16(loop_id);
 530                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 531                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 532                        mrk24->vp_index = vha->vp_idx;
 533                        mrk24->handle = make_handle(req->id, mrk24->handle);
 534                } else {
 535                        SET_TARGET_ID(ha, mrk->target, loop_id);
 536                        mrk->lun = cpu_to_le16((uint16_t)lun);
 537                }
 538        }
 539        wmb();
 540
 541        qla2x00_start_iocbs(vha, req);
 542
 543        return (QLA_SUCCESS);
 544}
 545
 546int
 547qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 548    uint16_t loop_id, uint64_t lun, uint8_t type)
 549{
 550        int ret;
 551        unsigned long flags = 0;
 552
 553        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 554        ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
 555        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 556
 557        return (ret);
 558}
 559
 560/*
 561 * qla2x00_issue_marker
 562 *
 563 * Issue marker
 564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 565 * Might release it, then reaquire.
 566 */
 567int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 568{
 569        if (ha_locked) {
 570                if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 571                                        MK_SYNC_ALL) != QLA_SUCCESS)
 572                        return QLA_FUNCTION_FAILED;
 573        } else {
 574                if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 575                                        MK_SYNC_ALL) != QLA_SUCCESS)
 576                        return QLA_FUNCTION_FAILED;
 577        }
 578        vha->marker_needed = 0;
 579
 580        return QLA_SUCCESS;
 581}
 582
 583static inline int
 584qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 585        uint16_t tot_dsds)
 586{
 587        struct dsd64 *cur_dsd = NULL, *next_dsd;
 588        scsi_qla_host_t *vha;
 589        struct qla_hw_data *ha;
 590        struct scsi_cmnd *cmd;
 591        struct  scatterlist *cur_seg;
 592        uint8_t avail_dsds;
 593        uint8_t first_iocb = 1;
 594        uint32_t dsd_list_len;
 595        struct dsd_dma *dsd_ptr;
 596        struct ct6_dsd *ctx;
 597        struct qla_qpair *qpair = sp->qpair;
 598
 599        cmd = GET_CMD_SP(sp);
 600
 601        /* Update entry type to indicate Command Type 3 IOCB */
 602        put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 603
 604        /* No data transfer */
 605        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 606                cmd_pkt->byte_count = cpu_to_le32(0);
 607                return 0;
 608        }
 609
 610        vha = sp->vha;
 611        ha = vha->hw;
 612
 613        /* Set transfer direction */
 614        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 615                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 616                qpair->counters.output_bytes += scsi_bufflen(cmd);
 617                qpair->counters.output_requests++;
 618        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 619                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 620                qpair->counters.input_bytes += scsi_bufflen(cmd);
 621                qpair->counters.input_requests++;
 622        }
 623
 624        cur_seg = scsi_sglist(cmd);
 625        ctx = sp->u.scmd.ct6_ctx;
 626
 627        while (tot_dsds) {
 628                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 629                    QLA_DSDS_PER_IOCB : tot_dsds;
 630                tot_dsds -= avail_dsds;
 631                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 632
 633                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 634                    struct dsd_dma, list);
 635                next_dsd = dsd_ptr->dsd_addr;
 636                list_del(&dsd_ptr->list);
 637                ha->gbl_dsd_avail--;
 638                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 639                ctx->dsd_use_cnt++;
 640                ha->gbl_dsd_inuse++;
 641
 642                if (first_iocb) {
 643                        first_iocb = 0;
 644                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 645                                           &cmd_pkt->fcp_dsd.address);
 646                        cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 647                } else {
 648                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 649                                           &cur_dsd->address);
 650                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 651                        cur_dsd++;
 652                }
 653                cur_dsd = next_dsd;
 654                while (avail_dsds) {
 655                        append_dsd64(&cur_dsd, cur_seg);
 656                        cur_seg = sg_next(cur_seg);
 657                        avail_dsds--;
 658                }
 659        }
 660
 661        /* Null termination */
 662        cur_dsd->address = 0;
 663        cur_dsd->length = 0;
 664        cur_dsd++;
 665        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
 666        return 0;
 667}
 668
 669/*
 670 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 671 * for Command Type 6.
 672 *
 673 * @dsds: number of data segment descriptors needed
 674 *
 675 * Returns the number of dsd list needed to store @dsds.
 676 */
 677static inline uint16_t
 678qla24xx_calc_dsd_lists(uint16_t dsds)
 679{
 680        uint16_t dsd_lists = 0;
 681
 682        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 683        if (dsds % QLA_DSDS_PER_IOCB)
 684                dsd_lists++;
 685        return dsd_lists;
 686}
 687
 688
 689/**
 690 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 691 * IOCB types.
 692 *
 693 * @sp: SRB command to process
 694 * @cmd_pkt: Command type 3 IOCB
 695 * @tot_dsds: Total number of segments to transfer
 696 * @req: pointer to request queue
 697 */
 698inline void
 699qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 700        uint16_t tot_dsds, struct req_que *req)
 701{
 702        uint16_t        avail_dsds;
 703        struct dsd64    *cur_dsd;
 704        scsi_qla_host_t *vha;
 705        struct scsi_cmnd *cmd;
 706        struct scatterlist *sg;
 707        int i;
 708        struct qla_qpair *qpair = sp->qpair;
 709
 710        cmd = GET_CMD_SP(sp);
 711
 712        /* Update entry type to indicate Command Type 3 IOCB */
 713        put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 714
 715        /* No data transfer */
 716        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 717                cmd_pkt->byte_count = cpu_to_le32(0);
 718                return;
 719        }
 720
 721        vha = sp->vha;
 722
 723        /* Set transfer direction */
 724        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 725                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 726                qpair->counters.output_bytes += scsi_bufflen(cmd);
 727                qpair->counters.output_requests++;
 728        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 729                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 730                qpair->counters.input_bytes += scsi_bufflen(cmd);
 731                qpair->counters.input_requests++;
 732        }
 733
 734        /* One DSD is available in the Command Type 3 IOCB */
 735        avail_dsds = 1;
 736        cur_dsd = &cmd_pkt->dsd;
 737
 738        /* Load data segments */
 739
 740        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 741                cont_a64_entry_t *cont_pkt;
 742
 743                /* Allocate additional continuation packets? */
 744                if (avail_dsds == 0) {
 745                        /*
 746                         * Five DSDs are available in the Continuation
 747                         * Type 1 IOCB.
 748                         */
 749                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 750                        cur_dsd = cont_pkt->dsd;
 751                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 752                }
 753
 754                append_dsd64(&cur_dsd, sg);
 755                avail_dsds--;
 756        }
 757}
 758
 759struct fw_dif_context {
 760        __le32  ref_tag;
 761        __le16  app_tag;
 762        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 763        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 764};
 765
 766/*
 767 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 768 *
 769 */
 770static inline void
 771qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 772    unsigned int protcnt)
 773{
 774        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 775
 776        switch (scsi_get_prot_type(cmd)) {
 777        case SCSI_PROT_DIF_TYPE0:
 778                /*
 779                 * No check for ql2xenablehba_err_chk, as it would be an
 780                 * I/O error if hba tag generation is not done.
 781                 */
 782                pkt->ref_tag = cpu_to_le32((uint32_t)
 783                    (0xffffffff & scsi_get_lba(cmd)));
 784
 785                if (!qla2x00_hba_err_chk_enabled(sp))
 786                        break;
 787
 788                pkt->ref_tag_mask[0] = 0xff;
 789                pkt->ref_tag_mask[1] = 0xff;
 790                pkt->ref_tag_mask[2] = 0xff;
 791                pkt->ref_tag_mask[3] = 0xff;
 792                break;
 793
 794        /*
 795         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 796         * match LBA in CDB + N
 797         */
 798        case SCSI_PROT_DIF_TYPE2:
 799                pkt->app_tag = cpu_to_le16(0);
 800                pkt->app_tag_mask[0] = 0x0;
 801                pkt->app_tag_mask[1] = 0x0;
 802
 803                pkt->ref_tag = cpu_to_le32((uint32_t)
 804                    (0xffffffff & scsi_get_lba(cmd)));
 805
 806                if (!qla2x00_hba_err_chk_enabled(sp))
 807                        break;
 808
 809                /* enable ALL bytes of the ref tag */
 810                pkt->ref_tag_mask[0] = 0xff;
 811                pkt->ref_tag_mask[1] = 0xff;
 812                pkt->ref_tag_mask[2] = 0xff;
 813                pkt->ref_tag_mask[3] = 0xff;
 814                break;
 815
 816        /* For Type 3 protection: 16 bit GUARD only */
 817        case SCSI_PROT_DIF_TYPE3:
 818                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 819                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 820                                                                0x00;
 821                break;
 822
 823        /*
 824         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 825         * 16 bit app tag.
 826         */
 827        case SCSI_PROT_DIF_TYPE1:
 828                pkt->ref_tag = cpu_to_le32((uint32_t)
 829                    (0xffffffff & scsi_get_lba(cmd)));
 830                pkt->app_tag = cpu_to_le16(0);
 831                pkt->app_tag_mask[0] = 0x0;
 832                pkt->app_tag_mask[1] = 0x0;
 833
 834                if (!qla2x00_hba_err_chk_enabled(sp))
 835                        break;
 836
 837                /* enable ALL bytes of the ref tag */
 838                pkt->ref_tag_mask[0] = 0xff;
 839                pkt->ref_tag_mask[1] = 0xff;
 840                pkt->ref_tag_mask[2] = 0xff;
 841                pkt->ref_tag_mask[3] = 0xff;
 842                break;
 843        }
 844}
 845
 846int
 847qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 848        uint32_t *partial)
 849{
 850        struct scatterlist *sg;
 851        uint32_t cumulative_partial, sg_len;
 852        dma_addr_t sg_dma_addr;
 853
 854        if (sgx->num_bytes == sgx->tot_bytes)
 855                return 0;
 856
 857        sg = sgx->cur_sg;
 858        cumulative_partial = sgx->tot_partial;
 859
 860        sg_dma_addr = sg_dma_address(sg);
 861        sg_len = sg_dma_len(sg);
 862
 863        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 864
 865        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 866                sgx->dma_len = (blk_sz - cumulative_partial);
 867                sgx->tot_partial = 0;
 868                sgx->num_bytes += blk_sz;
 869                *partial = 0;
 870        } else {
 871                sgx->dma_len = sg_len - sgx->bytes_consumed;
 872                sgx->tot_partial += sgx->dma_len;
 873                *partial = 1;
 874        }
 875
 876        sgx->bytes_consumed += sgx->dma_len;
 877
 878        if (sg_len == sgx->bytes_consumed) {
 879                sg = sg_next(sg);
 880                sgx->num_sg++;
 881                sgx->cur_sg = sg;
 882                sgx->bytes_consumed = 0;
 883        }
 884
 885        return 1;
 886}
 887
 888int
 889qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 890        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 891{
 892        void *next_dsd;
 893        uint8_t avail_dsds = 0;
 894        uint32_t dsd_list_len;
 895        struct dsd_dma *dsd_ptr;
 896        struct scatterlist *sg_prot;
 897        struct dsd64 *cur_dsd = dsd;
 898        uint16_t        used_dsds = tot_dsds;
 899        uint32_t        prot_int; /* protection interval */
 900        uint32_t        partial;
 901        struct qla2_sgx sgx;
 902        dma_addr_t      sle_dma;
 903        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 904        struct scsi_cmnd *cmd;
 905
 906        memset(&sgx, 0, sizeof(struct qla2_sgx));
 907        if (sp) {
 908                cmd = GET_CMD_SP(sp);
 909                prot_int = cmd->device->sector_size;
 910
 911                sgx.tot_bytes = scsi_bufflen(cmd);
 912                sgx.cur_sg = scsi_sglist(cmd);
 913                sgx.sp = sp;
 914
 915                sg_prot = scsi_prot_sglist(cmd);
 916        } else if (tc) {
 917                prot_int      = tc->blk_sz;
 918                sgx.tot_bytes = tc->bufflen;
 919                sgx.cur_sg    = tc->sg;
 920                sg_prot       = tc->prot_sg;
 921        } else {
 922                BUG();
 923                return 1;
 924        }
 925
 926        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 927
 928                sle_dma = sgx.dma_addr;
 929                sle_dma_len = sgx.dma_len;
 930alloc_and_fill:
 931                /* Allocate additional continuation packets? */
 932                if (avail_dsds == 0) {
 933                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 934                                        QLA_DSDS_PER_IOCB : used_dsds;
 935                        dsd_list_len = (avail_dsds + 1) * 12;
 936                        used_dsds -= avail_dsds;
 937
 938                        /* allocate tracking DS */
 939                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 940                        if (!dsd_ptr)
 941                                return 1;
 942
 943                        /* allocate new list */
 944                        dsd_ptr->dsd_addr = next_dsd =
 945                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 946                                &dsd_ptr->dsd_list_dma);
 947
 948                        if (!next_dsd) {
 949                                /*
 950                                 * Need to cleanup only this dsd_ptr, rest
 951                                 * will be done by sp_free_dma()
 952                                 */
 953                                kfree(dsd_ptr);
 954                                return 1;
 955                        }
 956
 957                        if (sp) {
 958                                list_add_tail(&dsd_ptr->list,
 959                                              &sp->u.scmd.crc_ctx->dsd_list);
 960
 961                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 962                        } else {
 963                                list_add_tail(&dsd_ptr->list,
 964                                    &(tc->ctx->dsd_list));
 965                                *tc->ctx_dsd_alloced = 1;
 966                        }
 967
 968
 969                        /* add new list to cmd iocb or last list */
 970                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 971                                           &cur_dsd->address);
 972                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 973                        cur_dsd = next_dsd;
 974                }
 975                put_unaligned_le64(sle_dma, &cur_dsd->address);
 976                cur_dsd->length = cpu_to_le32(sle_dma_len);
 977                cur_dsd++;
 978                avail_dsds--;
 979
 980                if (partial == 0) {
 981                        /* Got a full protection interval */
 982                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 983                        sle_dma_len = 8;
 984
 985                        tot_prot_dma_len += sle_dma_len;
 986                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 987                                tot_prot_dma_len = 0;
 988                                sg_prot = sg_next(sg_prot);
 989                        }
 990
 991                        partial = 1; /* So as to not re-enter this block */
 992                        goto alloc_and_fill;
 993                }
 994        }
 995        /* Null termination */
 996        cur_dsd->address = 0;
 997        cur_dsd->length = 0;
 998        cur_dsd++;
 999        return 0;
1000}
1001
1002int
1003qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1004        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1005{
1006        void *next_dsd;
1007        uint8_t avail_dsds = 0;
1008        uint32_t dsd_list_len;
1009        struct dsd_dma *dsd_ptr;
1010        struct scatterlist *sg, *sgl;
1011        struct dsd64 *cur_dsd = dsd;
1012        int     i;
1013        uint16_t        used_dsds = tot_dsds;
1014        struct scsi_cmnd *cmd;
1015
1016        if (sp) {
1017                cmd = GET_CMD_SP(sp);
1018                sgl = scsi_sglist(cmd);
1019        } else if (tc) {
1020                sgl = tc->sg;
1021        } else {
1022                BUG();
1023                return 1;
1024        }
1025
1026
1027        for_each_sg(sgl, sg, tot_dsds, i) {
1028                /* Allocate additional continuation packets? */
1029                if (avail_dsds == 0) {
1030                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1031                                        QLA_DSDS_PER_IOCB : used_dsds;
1032                        dsd_list_len = (avail_dsds + 1) * 12;
1033                        used_dsds -= avail_dsds;
1034
1035                        /* allocate tracking DS */
1036                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1037                        if (!dsd_ptr)
1038                                return 1;
1039
1040                        /* allocate new list */
1041                        dsd_ptr->dsd_addr = next_dsd =
1042                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1043                                &dsd_ptr->dsd_list_dma);
1044
1045                        if (!next_dsd) {
1046                                /*
1047                                 * Need to cleanup only this dsd_ptr, rest
1048                                 * will be done by sp_free_dma()
1049                                 */
1050                                kfree(dsd_ptr);
1051                                return 1;
1052                        }
1053
1054                        if (sp) {
1055                                list_add_tail(&dsd_ptr->list,
1056                                              &sp->u.scmd.crc_ctx->dsd_list);
1057
1058                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1059                        } else {
1060                                list_add_tail(&dsd_ptr->list,
1061                                    &(tc->ctx->dsd_list));
1062                                *tc->ctx_dsd_alloced = 1;
1063                        }
1064
1065                        /* add new list to cmd iocb or last list */
1066                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
1067                                           &cur_dsd->address);
1068                        cur_dsd->length = cpu_to_le32(dsd_list_len);
1069                        cur_dsd = next_dsd;
1070                }
1071                append_dsd64(&cur_dsd, sg);
1072                avail_dsds--;
1073
1074        }
1075        /* Null termination */
1076        cur_dsd->address = 0;
1077        cur_dsd->length = 0;
1078        cur_dsd++;
1079        return 0;
1080}
1081
1082int
1083qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1084        struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1085{
1086        struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1087        struct scatterlist *sg, *sgl;
1088        struct crc_context *difctx = NULL;
1089        struct scsi_qla_host *vha;
1090        uint dsd_list_len;
1091        uint avail_dsds = 0;
1092        uint used_dsds = tot_dsds;
1093        bool dif_local_dma_alloc = false;
1094        bool direction_to_device = false;
1095        int i;
1096
1097        if (sp) {
1098                struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1099
1100                sgl = scsi_prot_sglist(cmd);
1101                vha = sp->vha;
1102                difctx = sp->u.scmd.crc_ctx;
1103                direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1104                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1105                  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1106                        __func__, cmd, difctx, sp);
1107        } else if (tc) {
1108                vha = tc->vha;
1109                sgl = tc->prot_sg;
1110                difctx = tc->ctx;
1111                direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1112        } else {
1113                BUG();
1114                return 1;
1115        }
1116
1117        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1118            "%s: enter (write=%u)\n", __func__, direction_to_device);
1119
1120        /* if initiator doing write or target doing read */
1121        if (direction_to_device) {
1122                for_each_sg(sgl, sg, tot_dsds, i) {
1123                        u64 sle_phys = sg_phys(sg);
1124
1125                        /* If SGE addr + len flips bits in upper 32-bits */
1126                        if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1127                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1128                                    "%s: page boundary crossing (phys=%llx len=%x)\n",
1129                                    __func__, sle_phys, sg->length);
1130
1131                                if (difctx) {
1132                                        ha->dif_bundle_crossed_pages++;
1133                                        dif_local_dma_alloc = true;
1134                                } else {
1135                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1136                                            vha, 0xe022,
1137                                            "%s: difctx pointer is NULL\n",
1138                                            __func__);
1139                                }
1140                                break;
1141                        }
1142                }
1143                ha->dif_bundle_writes++;
1144        } else {
1145                ha->dif_bundle_reads++;
1146        }
1147
1148        if (ql2xdifbundlinginternalbuffers)
1149                dif_local_dma_alloc = direction_to_device;
1150
1151        if (dif_local_dma_alloc) {
1152                u32 track_difbundl_buf = 0;
1153                u32 ldma_sg_len = 0;
1154                u8 ldma_needed = 1;
1155
1156                difctx->no_dif_bundl = 0;
1157                difctx->dif_bundl_len = 0;
1158
1159                /* Track DSD buffers */
1160                INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1161                /* Track local DMA buffers */
1162                INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1163
1164                for_each_sg(sgl, sg, tot_dsds, i) {
1165                        u32 sglen = sg_dma_len(sg);
1166
1167                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1168                            "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1169                            __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1170                            difctx->dif_bundl_len, ldma_needed);
1171
1172                        while (sglen) {
1173                                u32 xfrlen = 0;
1174
1175                                if (ldma_needed) {
1176                                        /*
1177                                         * Allocate list item to store
1178                                         * the DMA buffers
1179                                         */
1180                                        dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1181                                            GFP_ATOMIC);
1182                                        if (!dsd_ptr) {
1183                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1184                                                    "%s: failed alloc dsd_ptr\n",
1185                                                    __func__);
1186                                                return 1;
1187                                        }
1188                                        ha->dif_bundle_kallocs++;
1189
1190                                        /* allocate dma buffer */
1191                                        dsd_ptr->dsd_addr = dma_pool_alloc
1192                                                (ha->dif_bundl_pool, GFP_ATOMIC,
1193                                                 &dsd_ptr->dsd_list_dma);
1194                                        if (!dsd_ptr->dsd_addr) {
1195                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1196                                                    "%s: failed alloc ->dsd_ptr\n",
1197                                                    __func__);
1198                                                /*
1199                                                 * need to cleanup only this
1200                                                 * dsd_ptr rest will be done
1201                                                 * by sp_free_dma()
1202                                                 */
1203                                                kfree(dsd_ptr);
1204                                                ha->dif_bundle_kallocs--;
1205                                                return 1;
1206                                        }
1207                                        ha->dif_bundle_dma_allocs++;
1208                                        ldma_needed = 0;
1209                                        difctx->no_dif_bundl++;
1210                                        list_add_tail(&dsd_ptr->list,
1211                                            &difctx->ldif_dma_hndl_list);
1212                                }
1213
1214                                /* xfrlen is min of dma pool size and sglen */
1215                                xfrlen = (sglen >
1216                                   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1217                                    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1218                                    sglen;
1219
1220                                /* replace with local allocated dma buffer */
1221                                sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1222                                    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1223                                    difctx->dif_bundl_len);
1224                                difctx->dif_bundl_len += xfrlen;
1225                                sglen -= xfrlen;
1226                                ldma_sg_len += xfrlen;
1227                                if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1228                                    sg_is_last(sg)) {
1229                                        ldma_needed = 1;
1230                                        ldma_sg_len = 0;
1231                                }
1232                        }
1233                }
1234
1235                track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1236                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1237                    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1238                    difctx->dif_bundl_len, difctx->no_dif_bundl,
1239                    track_difbundl_buf);
1240
1241                if (sp)
1242                        sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1243                else
1244                        tc->prot_flags = DIF_BUNDL_DMA_VALID;
1245
1246                list_for_each_entry_safe(dif_dsd, nxt_dsd,
1247                    &difctx->ldif_dma_hndl_list, list) {
1248                        u32 sglen = (difctx->dif_bundl_len >
1249                            DIF_BUNDLING_DMA_POOL_SIZE) ?
1250                            DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1251
1252                        BUG_ON(track_difbundl_buf == 0);
1253
1254                        /* Allocate additional continuation packets? */
1255                        if (avail_dsds == 0) {
1256                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1257                                    0xe024,
1258                                    "%s: adding continuation iocb's\n",
1259                                    __func__);
1260                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1261                                    QLA_DSDS_PER_IOCB : used_dsds;
1262                                dsd_list_len = (avail_dsds + 1) * 12;
1263                                used_dsds -= avail_dsds;
1264
1265                                /* allocate tracking DS */
1266                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1267                                if (!dsd_ptr) {
1268                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1269                                            "%s: failed alloc dsd_ptr\n",
1270                                            __func__);
1271                                        return 1;
1272                                }
1273                                ha->dif_bundle_kallocs++;
1274
1275                                difctx->no_ldif_dsd++;
1276                                /* allocate new list */
1277                                dsd_ptr->dsd_addr =
1278                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1279                                        &dsd_ptr->dsd_list_dma);
1280                                if (!dsd_ptr->dsd_addr) {
1281                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1282                                            "%s: failed alloc ->dsd_addr\n",
1283                                            __func__);
1284                                        /*
1285                                         * need to cleanup only this dsd_ptr
1286                                         *  rest will be done by sp_free_dma()
1287                                         */
1288                                        kfree(dsd_ptr);
1289                                        ha->dif_bundle_kallocs--;
1290                                        return 1;
1291                                }
1292                                ha->dif_bundle_dma_allocs++;
1293
1294                                if (sp) {
1295                                        list_add_tail(&dsd_ptr->list,
1296                                            &difctx->ldif_dsd_list);
1297                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1298                                } else {
1299                                        list_add_tail(&dsd_ptr->list,
1300                                            &difctx->ldif_dsd_list);
1301                                        tc->ctx_dsd_alloced = 1;
1302                                }
1303
1304                                /* add new list to cmd iocb or last list */
1305                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1306                                                   &cur_dsd->address);
1307                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1308                                cur_dsd = dsd_ptr->dsd_addr;
1309                        }
1310                        put_unaligned_le64(dif_dsd->dsd_list_dma,
1311                                           &cur_dsd->address);
1312                        cur_dsd->length = cpu_to_le32(sglen);
1313                        cur_dsd++;
1314                        avail_dsds--;
1315                        difctx->dif_bundl_len -= sglen;
1316                        track_difbundl_buf--;
1317                }
1318
1319                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1320                    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1321                        difctx->no_ldif_dsd, difctx->no_dif_bundl);
1322        } else {
1323                for_each_sg(sgl, sg, tot_dsds, i) {
1324                        /* Allocate additional continuation packets? */
1325                        if (avail_dsds == 0) {
1326                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1327                                    QLA_DSDS_PER_IOCB : used_dsds;
1328                                dsd_list_len = (avail_dsds + 1) * 12;
1329                                used_dsds -= avail_dsds;
1330
1331                                /* allocate tracking DS */
1332                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1333                                if (!dsd_ptr) {
1334                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1335                                            vha, 0xe027,
1336                                            "%s: failed alloc dsd_dma...\n",
1337                                            __func__);
1338                                        return 1;
1339                                }
1340
1341                                /* allocate new list */
1342                                dsd_ptr->dsd_addr =
1343                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1344                                        &dsd_ptr->dsd_list_dma);
1345                                if (!dsd_ptr->dsd_addr) {
1346                                        /* need to cleanup only this dsd_ptr */
1347                                        /* rest will be done by sp_free_dma() */
1348                                        kfree(dsd_ptr);
1349                                        return 1;
1350                                }
1351
1352                                if (sp) {
1353                                        list_add_tail(&dsd_ptr->list,
1354                                            &difctx->dsd_list);
1355                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1356                                } else {
1357                                        list_add_tail(&dsd_ptr->list,
1358                                            &difctx->dsd_list);
1359                                        tc->ctx_dsd_alloced = 1;
1360                                }
1361
1362                                /* add new list to cmd iocb or last list */
1363                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1364                                                   &cur_dsd->address);
1365                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1366                                cur_dsd = dsd_ptr->dsd_addr;
1367                        }
1368                        append_dsd64(&cur_dsd, sg);
1369                        avail_dsds--;
1370                }
1371        }
1372        /* Null termination */
1373        cur_dsd->address = 0;
1374        cur_dsd->length = 0;
1375        cur_dsd++;
1376        return 0;
1377}
1378
1379/**
1380 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1381 *                                                      Type 6 IOCB types.
1382 *
1383 * @sp: SRB command to process
1384 * @cmd_pkt: Command type 3 IOCB
1385 * @tot_dsds: Total number of segments to transfer
1386 * @tot_prot_dsds: Total number of segments with protection information
1387 * @fw_prot_opts: Protection options to be passed to firmware
1388 */
1389static inline int
1390qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1391    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1392{
1393        struct dsd64            *cur_dsd;
1394        __be32                  *fcp_dl;
1395        scsi_qla_host_t         *vha;
1396        struct scsi_cmnd        *cmd;
1397        uint32_t                total_bytes = 0;
1398        uint32_t                data_bytes;
1399        uint32_t                dif_bytes;
1400        uint8_t                 bundling = 1;
1401        uint16_t                blk_size;
1402        struct crc_context      *crc_ctx_pkt = NULL;
1403        struct qla_hw_data      *ha;
1404        uint8_t                 additional_fcpcdb_len;
1405        uint16_t                fcp_cmnd_len;
1406        struct fcp_cmnd         *fcp_cmnd;
1407        dma_addr_t              crc_ctx_dma;
1408
1409        cmd = GET_CMD_SP(sp);
1410
1411        /* Update entry type to indicate Command Type CRC_2 IOCB */
1412        put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1413
1414        vha = sp->vha;
1415        ha = vha->hw;
1416
1417        /* No data transfer */
1418        data_bytes = scsi_bufflen(cmd);
1419        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1420                cmd_pkt->byte_count = cpu_to_le32(0);
1421                return QLA_SUCCESS;
1422        }
1423
1424        cmd_pkt->vp_index = sp->vha->vp_idx;
1425
1426        /* Set transfer direction */
1427        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1428                cmd_pkt->control_flags =
1429                    cpu_to_le16(CF_WRITE_DATA);
1430        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1431                cmd_pkt->control_flags =
1432                    cpu_to_le16(CF_READ_DATA);
1433        }
1434
1435        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1436            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1437            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1438            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1439                bundling = 0;
1440
1441        /* Allocate CRC context from global pool */
1442        crc_ctx_pkt = sp->u.scmd.crc_ctx =
1443            dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1444
1445        if (!crc_ctx_pkt)
1446                goto crc_queuing_error;
1447
1448        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1449
1450        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1451
1452        /* Set handle */
1453        crc_ctx_pkt->handle = cmd_pkt->handle;
1454
1455        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1456
1457        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1458            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1459
1460        put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1461        cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1462
1463        /* Determine SCSI command length -- align to 4 byte boundary */
1464        if (cmd->cmd_len > 16) {
1465                additional_fcpcdb_len = cmd->cmd_len - 16;
1466                if ((cmd->cmd_len % 4) != 0) {
1467                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1468                        goto crc_queuing_error;
1469                }
1470                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1471        } else {
1472                additional_fcpcdb_len = 0;
1473                fcp_cmnd_len = 12 + 16 + 4;
1474        }
1475
1476        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1477
1478        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1479        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1480                fcp_cmnd->additional_cdb_len |= 1;
1481        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1482                fcp_cmnd->additional_cdb_len |= 2;
1483
1484        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1485        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1486        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1487        put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1488                           &cmd_pkt->fcp_cmnd_dseg_address);
1489        fcp_cmnd->task_management = 0;
1490        fcp_cmnd->task_attribute = TSK_SIMPLE;
1491
1492        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1493
1494        /* Compute dif len and adjust data len to incude protection */
1495        dif_bytes = 0;
1496        blk_size = cmd->device->sector_size;
1497        dif_bytes = (data_bytes / blk_size) * 8;
1498
1499        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1500        case SCSI_PROT_READ_INSERT:
1501        case SCSI_PROT_WRITE_STRIP:
1502                total_bytes = data_bytes;
1503                data_bytes += dif_bytes;
1504                break;
1505
1506        case SCSI_PROT_READ_STRIP:
1507        case SCSI_PROT_WRITE_INSERT:
1508        case SCSI_PROT_READ_PASS:
1509        case SCSI_PROT_WRITE_PASS:
1510                total_bytes = data_bytes + dif_bytes;
1511                break;
1512        default:
1513                BUG();
1514        }
1515
1516        if (!qla2x00_hba_err_chk_enabled(sp))
1517                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1518        /* HBA error checking enabled */
1519        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1520                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1521                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1522                        SCSI_PROT_DIF_TYPE2))
1523                        fw_prot_opts |= BIT_10;
1524                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1525                    SCSI_PROT_DIF_TYPE3)
1526                        fw_prot_opts |= BIT_11;
1527        }
1528
1529        if (!bundling) {
1530                cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1531        } else {
1532                /*
1533                 * Configure Bundling if we need to fetch interlaving
1534                 * protection PCI accesses
1535                 */
1536                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1537                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1538                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1539                                                        tot_prot_dsds);
1540                cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1541        }
1542
1543        /* Finish the common fields of CRC pkt */
1544        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1545        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1546        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1547        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1548        /* Fibre channel byte count */
1549        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1550        fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1551            additional_fcpcdb_len);
1552        *fcp_dl = htonl(total_bytes);
1553
1554        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1555                cmd_pkt->byte_count = cpu_to_le32(0);
1556                return QLA_SUCCESS;
1557        }
1558        /* Walks data segments */
1559
1560        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1561
1562        if (!bundling && tot_prot_dsds) {
1563                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1564                        cur_dsd, tot_dsds, NULL))
1565                        goto crc_queuing_error;
1566        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1567                        (tot_dsds - tot_prot_dsds), NULL))
1568                goto crc_queuing_error;
1569
1570        if (bundling && tot_prot_dsds) {
1571                /* Walks dif segments */
1572                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1573                cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1574                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1575                                tot_prot_dsds, NULL))
1576                        goto crc_queuing_error;
1577        }
1578        return QLA_SUCCESS;
1579
1580crc_queuing_error:
1581        /* Cleanup will be performed by the caller */
1582
1583        return QLA_FUNCTION_FAILED;
1584}
1585
1586/**
1587 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1588 * @sp: command to send to the ISP
1589 *
1590 * Returns non-zero if a failure occurred, else zero.
1591 */
1592int
1593qla24xx_start_scsi(srb_t *sp)
1594{
1595        int             nseg;
1596        unsigned long   flags;
1597        uint32_t        *clr_ptr;
1598        uint32_t        handle;
1599        struct cmd_type_7 *cmd_pkt;
1600        uint16_t        cnt;
1601        uint16_t        req_cnt;
1602        uint16_t        tot_dsds;
1603        struct req_que *req = NULL;
1604        struct rsp_que *rsp;
1605        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1606        struct scsi_qla_host *vha = sp->vha;
1607        struct qla_hw_data *ha = vha->hw;
1608
1609        if (sp->fcport->edif.enable  && (sp->fcport->flags & FCF_FCSP_DEVICE))
1610                return qla28xx_start_scsi_edif(sp);
1611
1612        /* Setup device pointers. */
1613        req = vha->req;
1614        rsp = req->rsp;
1615
1616        /* So we know we haven't pci_map'ed anything yet */
1617        tot_dsds = 0;
1618
1619        /* Send marker if required */
1620        if (vha->marker_needed != 0) {
1621                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1622                    QLA_SUCCESS)
1623                        return QLA_FUNCTION_FAILED;
1624                vha->marker_needed = 0;
1625        }
1626
1627        /* Acquire ring specific lock */
1628        spin_lock_irqsave(&ha->hardware_lock, flags);
1629
1630        handle = qla2xxx_get_next_handle(req);
1631        if (handle == 0)
1632                goto queuing_error;
1633
1634        /* Map the sg table so we have an accurate count of sg entries needed */
1635        if (scsi_sg_count(cmd)) {
1636                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1637                    scsi_sg_count(cmd), cmd->sc_data_direction);
1638                if (unlikely(!nseg))
1639                        goto queuing_error;
1640        } else
1641                nseg = 0;
1642
1643        tot_dsds = nseg;
1644        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1645
1646        sp->iores.res_type = RESOURCE_INI;
1647        sp->iores.iocb_cnt = req_cnt;
1648        if (qla_get_iocbs(sp->qpair, &sp->iores))
1649                goto queuing_error;
1650
1651        if (req->cnt < (req_cnt + 2)) {
1652                if (IS_SHADOW_REG_CAPABLE(ha)) {
1653                        cnt = *req->out_ptr;
1654                } else {
1655                        cnt = rd_reg_dword_relaxed(req->req_q_out);
1656                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1657                                goto queuing_error;
1658                }
1659
1660                if (req->ring_index < cnt)
1661                        req->cnt = cnt - req->ring_index;
1662                else
1663                        req->cnt = req->length -
1664                                (req->ring_index - cnt);
1665                if (req->cnt < (req_cnt + 2))
1666                        goto queuing_error;
1667        }
1668
1669        /* Build command packet. */
1670        req->current_outstanding_cmd = handle;
1671        req->outstanding_cmds[handle] = sp;
1672        sp->handle = handle;
1673        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1674        req->cnt -= req_cnt;
1675
1676        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1677        cmd_pkt->handle = make_handle(req->id, handle);
1678
1679        /* Zero out remaining portion of packet. */
1680        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1681        clr_ptr = (uint32_t *)cmd_pkt + 2;
1682        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1683        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1684
1685        /* Set NPORT-ID and LUN number*/
1686        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1687        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1688        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1689        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1690        cmd_pkt->vp_index = sp->vha->vp_idx;
1691
1692        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1693        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1694
1695        cmd_pkt->task = TSK_SIMPLE;
1696
1697        /* Load SCSI command packet. */
1698        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1699        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1700
1701        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1702
1703        /* Build IOCB segments */
1704        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1705
1706        /* Set total data segment count. */
1707        cmd_pkt->entry_count = (uint8_t)req_cnt;
1708        wmb();
1709        /* Adjust ring index. */
1710        req->ring_index++;
1711        if (req->ring_index == req->length) {
1712                req->ring_index = 0;
1713                req->ring_ptr = req->ring;
1714        } else
1715                req->ring_ptr++;
1716
1717        sp->qpair->cmd_cnt++;
1718        sp->flags |= SRB_DMA_VALID;
1719
1720        /* Set chip new ring index. */
1721        wrt_reg_dword(req->req_q_in, req->ring_index);
1722
1723        /* Manage unprocessed RIO/ZIO commands in response queue. */
1724        if (vha->flags.process_response_queue &&
1725            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1726                qla24xx_process_response_queue(vha, rsp);
1727
1728        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1729        return QLA_SUCCESS;
1730
1731queuing_error:
1732        if (tot_dsds)
1733                scsi_dma_unmap(cmd);
1734
1735        qla_put_iocbs(sp->qpair, &sp->iores);
1736        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1737
1738        return QLA_FUNCTION_FAILED;
1739}
1740
1741/**
1742 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1743 * @sp: command to send to the ISP
1744 *
1745 * Returns non-zero if a failure occurred, else zero.
1746 */
1747int
1748qla24xx_dif_start_scsi(srb_t *sp)
1749{
1750        int                     nseg;
1751        unsigned long           flags;
1752        uint32_t                *clr_ptr;
1753        uint32_t                handle;
1754        uint16_t                cnt;
1755        uint16_t                req_cnt = 0;
1756        uint16_t                tot_dsds;
1757        uint16_t                tot_prot_dsds;
1758        uint16_t                fw_prot_opts = 0;
1759        struct req_que          *req = NULL;
1760        struct rsp_que          *rsp = NULL;
1761        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1762        struct scsi_qla_host    *vha = sp->vha;
1763        struct qla_hw_data      *ha = vha->hw;
1764        struct cmd_type_crc_2   *cmd_pkt;
1765        uint32_t                status = 0;
1766
1767#define QDSS_GOT_Q_SPACE        BIT_0
1768
1769        /* Only process protection or >16 cdb in this routine */
1770        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1771                if (cmd->cmd_len <= 16)
1772                        return qla24xx_start_scsi(sp);
1773        }
1774
1775        /* Setup device pointers. */
1776        req = vha->req;
1777        rsp = req->rsp;
1778
1779        /* So we know we haven't pci_map'ed anything yet */
1780        tot_dsds = 0;
1781
1782        /* Send marker if required */
1783        if (vha->marker_needed != 0) {
1784                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1785                    QLA_SUCCESS)
1786                        return QLA_FUNCTION_FAILED;
1787                vha->marker_needed = 0;
1788        }
1789
1790        /* Acquire ring specific lock */
1791        spin_lock_irqsave(&ha->hardware_lock, flags);
1792
1793        handle = qla2xxx_get_next_handle(req);
1794        if (handle == 0)
1795                goto queuing_error;
1796
1797        /* Compute number of required data segments */
1798        /* Map the sg table so we have an accurate count of sg entries needed */
1799        if (scsi_sg_count(cmd)) {
1800                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1801                    scsi_sg_count(cmd), cmd->sc_data_direction);
1802                if (unlikely(!nseg))
1803                        goto queuing_error;
1804                else
1805                        sp->flags |= SRB_DMA_VALID;
1806
1807                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1808                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1809                        struct qla2_sgx sgx;
1810                        uint32_t        partial;
1811
1812                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1813                        sgx.tot_bytes = scsi_bufflen(cmd);
1814                        sgx.cur_sg = scsi_sglist(cmd);
1815                        sgx.sp = sp;
1816
1817                        nseg = 0;
1818                        while (qla24xx_get_one_block_sg(
1819                            cmd->device->sector_size, &sgx, &partial))
1820                                nseg++;
1821                }
1822        } else
1823                nseg = 0;
1824
1825        /* number of required data segments */
1826        tot_dsds = nseg;
1827
1828        /* Compute number of required protection segments */
1829        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1830                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1831                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1832                if (unlikely(!nseg))
1833                        goto queuing_error;
1834                else
1835                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1836
1837                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1838                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1839                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1840                }
1841        } else {
1842                nseg = 0;
1843        }
1844
1845        req_cnt = 1;
1846        /* Total Data and protection sg segment(s) */
1847        tot_prot_dsds = nseg;
1848        tot_dsds += nseg;
1849
1850        sp->iores.res_type = RESOURCE_INI;
1851        sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1852        if (qla_get_iocbs(sp->qpair, &sp->iores))
1853                goto queuing_error;
1854
1855        if (req->cnt < (req_cnt + 2)) {
1856                if (IS_SHADOW_REG_CAPABLE(ha)) {
1857                        cnt = *req->out_ptr;
1858                } else {
1859                        cnt = rd_reg_dword_relaxed(req->req_q_out);
1860                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1861                                goto queuing_error;
1862                }
1863                if (req->ring_index < cnt)
1864                        req->cnt = cnt - req->ring_index;
1865                else
1866                        req->cnt = req->length -
1867                                (req->ring_index - cnt);
1868                if (req->cnt < (req_cnt + 2))
1869                        goto queuing_error;
1870        }
1871
1872        status |= QDSS_GOT_Q_SPACE;
1873
1874        /* Build header part of command packet (excluding the OPCODE). */
1875        req->current_outstanding_cmd = handle;
1876        req->outstanding_cmds[handle] = sp;
1877        sp->handle = handle;
1878        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1879        req->cnt -= req_cnt;
1880
1881        /* Fill-in common area */
1882        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1883        cmd_pkt->handle = make_handle(req->id, handle);
1884
1885        clr_ptr = (uint32_t *)cmd_pkt + 2;
1886        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1887
1888        /* Set NPORT-ID and LUN number*/
1889        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1890        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1891        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1892        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1893
1894        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1895        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1896
1897        /* Total Data and protection segment(s) */
1898        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1899
1900        /* Build IOCB segments and adjust for data protection segments */
1901        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1902            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1903                QLA_SUCCESS)
1904                goto queuing_error;
1905
1906        cmd_pkt->entry_count = (uint8_t)req_cnt;
1907        /* Specify response queue number where completion should happen */
1908        cmd_pkt->entry_status = (uint8_t) rsp->id;
1909        cmd_pkt->timeout = cpu_to_le16(0);
1910        wmb();
1911
1912        /* Adjust ring index. */
1913        req->ring_index++;
1914        if (req->ring_index == req->length) {
1915                req->ring_index = 0;
1916                req->ring_ptr = req->ring;
1917        } else
1918                req->ring_ptr++;
1919
1920        sp->qpair->cmd_cnt++;
1921        /* Set chip new ring index. */
1922        wrt_reg_dword(req->req_q_in, req->ring_index);
1923
1924        /* Manage unprocessed RIO/ZIO commands in response queue. */
1925        if (vha->flags.process_response_queue &&
1926            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1927                qla24xx_process_response_queue(vha, rsp);
1928
1929        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1930
1931        return QLA_SUCCESS;
1932
1933queuing_error:
1934        if (status & QDSS_GOT_Q_SPACE) {
1935                req->outstanding_cmds[handle] = NULL;
1936                req->cnt += req_cnt;
1937        }
1938        /* Cleanup will be performed by the caller (queuecommand) */
1939
1940        qla_put_iocbs(sp->qpair, &sp->iores);
1941        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1942
1943        return QLA_FUNCTION_FAILED;
1944}
1945
1946/**
1947 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1948 * @sp: command to send to the ISP
1949 *
1950 * Returns non-zero if a failure occurred, else zero.
1951 */
1952static int
1953qla2xxx_start_scsi_mq(srb_t *sp)
1954{
1955        int             nseg;
1956        unsigned long   flags;
1957        uint32_t        *clr_ptr;
1958        uint32_t        handle;
1959        struct cmd_type_7 *cmd_pkt;
1960        uint16_t        cnt;
1961        uint16_t        req_cnt;
1962        uint16_t        tot_dsds;
1963        struct req_que *req = NULL;
1964        struct rsp_que *rsp;
1965        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1966        struct scsi_qla_host *vha = sp->fcport->vha;
1967        struct qla_hw_data *ha = vha->hw;
1968        struct qla_qpair *qpair = sp->qpair;
1969
1970        if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
1971                return qla28xx_start_scsi_edif(sp);
1972
1973        /* Acquire qpair specific lock */
1974        spin_lock_irqsave(&qpair->qp_lock, flags);
1975
1976        /* Setup qpair pointers */
1977        req = qpair->req;
1978        rsp = qpair->rsp;
1979
1980        /* So we know we haven't pci_map'ed anything yet */
1981        tot_dsds = 0;
1982
1983        /* Send marker if required */
1984        if (vha->marker_needed != 0) {
1985                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1986                    QLA_SUCCESS) {
1987                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1988                        return QLA_FUNCTION_FAILED;
1989                }
1990                vha->marker_needed = 0;
1991        }
1992
1993        handle = qla2xxx_get_next_handle(req);
1994        if (handle == 0)
1995                goto queuing_error;
1996
1997        /* Map the sg table so we have an accurate count of sg entries needed */
1998        if (scsi_sg_count(cmd)) {
1999                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2000                    scsi_sg_count(cmd), cmd->sc_data_direction);
2001                if (unlikely(!nseg))
2002                        goto queuing_error;
2003        } else
2004                nseg = 0;
2005
2006        tot_dsds = nseg;
2007        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2008
2009        sp->iores.res_type = RESOURCE_INI;
2010        sp->iores.iocb_cnt = req_cnt;
2011        if (qla_get_iocbs(sp->qpair, &sp->iores))
2012                goto queuing_error;
2013
2014        if (req->cnt < (req_cnt + 2)) {
2015                if (IS_SHADOW_REG_CAPABLE(ha)) {
2016                        cnt = *req->out_ptr;
2017                } else {
2018                        cnt = rd_reg_dword_relaxed(req->req_q_out);
2019                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2020                                goto queuing_error;
2021                }
2022
2023                if (req->ring_index < cnt)
2024                        req->cnt = cnt - req->ring_index;
2025                else
2026                        req->cnt = req->length -
2027                                (req->ring_index - cnt);
2028                if (req->cnt < (req_cnt + 2))
2029                        goto queuing_error;
2030        }
2031
2032        /* Build command packet. */
2033        req->current_outstanding_cmd = handle;
2034        req->outstanding_cmds[handle] = sp;
2035        sp->handle = handle;
2036        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2037        req->cnt -= req_cnt;
2038
2039        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2040        cmd_pkt->handle = make_handle(req->id, handle);
2041
2042        /* Zero out remaining portion of packet. */
2043        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2044        clr_ptr = (uint32_t *)cmd_pkt + 2;
2045        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2046        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2047
2048        /* Set NPORT-ID and LUN number*/
2049        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2050        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2051        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2052        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2053        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2054
2055        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2056        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2057
2058        cmd_pkt->task = TSK_SIMPLE;
2059
2060        /* Load SCSI command packet. */
2061        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2062        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2063
2064        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2065
2066        /* Build IOCB segments */
2067        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2068
2069        /* Set total data segment count. */
2070        cmd_pkt->entry_count = (uint8_t)req_cnt;
2071        wmb();
2072        /* Adjust ring index. */
2073        req->ring_index++;
2074        if (req->ring_index == req->length) {
2075                req->ring_index = 0;
2076                req->ring_ptr = req->ring;
2077        } else
2078                req->ring_ptr++;
2079
2080        sp->qpair->cmd_cnt++;
2081        sp->flags |= SRB_DMA_VALID;
2082
2083        /* Set chip new ring index. */
2084        wrt_reg_dword(req->req_q_in, req->ring_index);
2085
2086        /* Manage unprocessed RIO/ZIO commands in response queue. */
2087        if (vha->flags.process_response_queue &&
2088            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2089                qla24xx_process_response_queue(vha, rsp);
2090
2091        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2092        return QLA_SUCCESS;
2093
2094queuing_error:
2095        if (tot_dsds)
2096                scsi_dma_unmap(cmd);
2097
2098        qla_put_iocbs(sp->qpair, &sp->iores);
2099        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2100
2101        return QLA_FUNCTION_FAILED;
2102}
2103
2104
2105/**
2106 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2107 * @sp: command to send to the ISP
2108 *
2109 * Returns non-zero if a failure occurred, else zero.
2110 */
2111int
2112qla2xxx_dif_start_scsi_mq(srb_t *sp)
2113{
2114        int                     nseg;
2115        unsigned long           flags;
2116        uint32_t                *clr_ptr;
2117        uint32_t                handle;
2118        uint16_t                cnt;
2119        uint16_t                req_cnt = 0;
2120        uint16_t                tot_dsds;
2121        uint16_t                tot_prot_dsds;
2122        uint16_t                fw_prot_opts = 0;
2123        struct req_que          *req = NULL;
2124        struct rsp_que          *rsp = NULL;
2125        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2126        struct scsi_qla_host    *vha = sp->fcport->vha;
2127        struct qla_hw_data      *ha = vha->hw;
2128        struct cmd_type_crc_2   *cmd_pkt;
2129        uint32_t                status = 0;
2130        struct qla_qpair        *qpair = sp->qpair;
2131
2132#define QDSS_GOT_Q_SPACE        BIT_0
2133
2134        /* Check for host side state */
2135        if (!qpair->online) {
2136                cmd->result = DID_NO_CONNECT << 16;
2137                return QLA_INTERFACE_ERROR;
2138        }
2139
2140        if (!qpair->difdix_supported &&
2141                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2142                cmd->result = DID_NO_CONNECT << 16;
2143                return QLA_INTERFACE_ERROR;
2144        }
2145
2146        /* Only process protection or >16 cdb in this routine */
2147        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2148                if (cmd->cmd_len <= 16)
2149                        return qla2xxx_start_scsi_mq(sp);
2150        }
2151
2152        spin_lock_irqsave(&qpair->qp_lock, flags);
2153
2154        /* Setup qpair pointers */
2155        rsp = qpair->rsp;
2156        req = qpair->req;
2157
2158        /* So we know we haven't pci_map'ed anything yet */
2159        tot_dsds = 0;
2160
2161        /* Send marker if required */
2162        if (vha->marker_needed != 0) {
2163                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2164                    QLA_SUCCESS) {
2165                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2166                        return QLA_FUNCTION_FAILED;
2167                }
2168                vha->marker_needed = 0;
2169        }
2170
2171        handle = qla2xxx_get_next_handle(req);
2172        if (handle == 0)
2173                goto queuing_error;
2174
2175        /* Compute number of required data segments */
2176        /* Map the sg table so we have an accurate count of sg entries needed */
2177        if (scsi_sg_count(cmd)) {
2178                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2179                    scsi_sg_count(cmd), cmd->sc_data_direction);
2180                if (unlikely(!nseg))
2181                        goto queuing_error;
2182                else
2183                        sp->flags |= SRB_DMA_VALID;
2184
2185                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2186                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2187                        struct qla2_sgx sgx;
2188                        uint32_t        partial;
2189
2190                        memset(&sgx, 0, sizeof(struct qla2_sgx));
2191                        sgx.tot_bytes = scsi_bufflen(cmd);
2192                        sgx.cur_sg = scsi_sglist(cmd);
2193                        sgx.sp = sp;
2194
2195                        nseg = 0;
2196                        while (qla24xx_get_one_block_sg(
2197                            cmd->device->sector_size, &sgx, &partial))
2198                                nseg++;
2199                }
2200        } else
2201                nseg = 0;
2202
2203        /* number of required data segments */
2204        tot_dsds = nseg;
2205
2206        /* Compute number of required protection segments */
2207        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2208                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2209                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2210                if (unlikely(!nseg))
2211                        goto queuing_error;
2212                else
2213                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
2214
2215                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2216                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2217                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2218                }
2219        } else {
2220                nseg = 0;
2221        }
2222
2223        req_cnt = 1;
2224        /* Total Data and protection sg segment(s) */
2225        tot_prot_dsds = nseg;
2226        tot_dsds += nseg;
2227
2228        sp->iores.res_type = RESOURCE_INI;
2229        sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2230        if (qla_get_iocbs(sp->qpair, &sp->iores))
2231                goto queuing_error;
2232
2233        if (req->cnt < (req_cnt + 2)) {
2234                if (IS_SHADOW_REG_CAPABLE(ha)) {
2235                        cnt = *req->out_ptr;
2236                } else {
2237                        cnt = rd_reg_dword_relaxed(req->req_q_out);
2238                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2239                                goto queuing_error;
2240                }
2241
2242                if (req->ring_index < cnt)
2243                        req->cnt = cnt - req->ring_index;
2244                else
2245                        req->cnt = req->length -
2246                                (req->ring_index - cnt);
2247                if (req->cnt < (req_cnt + 2))
2248                        goto queuing_error;
2249        }
2250
2251        status |= QDSS_GOT_Q_SPACE;
2252
2253        /* Build header part of command packet (excluding the OPCODE). */
2254        req->current_outstanding_cmd = handle;
2255        req->outstanding_cmds[handle] = sp;
2256        sp->handle = handle;
2257        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2258        req->cnt -= req_cnt;
2259
2260        /* Fill-in common area */
2261        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2262        cmd_pkt->handle = make_handle(req->id, handle);
2263
2264        clr_ptr = (uint32_t *)cmd_pkt + 2;
2265        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2266
2267        /* Set NPORT-ID and LUN number*/
2268        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2269        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2270        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2271        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2272
2273        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2274        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2275
2276        /* Total Data and protection segment(s) */
2277        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2278
2279        /* Build IOCB segments and adjust for data protection segments */
2280        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2281            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2282                QLA_SUCCESS)
2283                goto queuing_error;
2284
2285        cmd_pkt->entry_count = (uint8_t)req_cnt;
2286        cmd_pkt->timeout = cpu_to_le16(0);
2287        wmb();
2288
2289        /* Adjust ring index. */
2290        req->ring_index++;
2291        if (req->ring_index == req->length) {
2292                req->ring_index = 0;
2293                req->ring_ptr = req->ring;
2294        } else
2295                req->ring_ptr++;
2296
2297        sp->qpair->cmd_cnt++;
2298        /* Set chip new ring index. */
2299        wrt_reg_dword(req->req_q_in, req->ring_index);
2300
2301        /* Manage unprocessed RIO/ZIO commands in response queue. */
2302        if (vha->flags.process_response_queue &&
2303            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2304                qla24xx_process_response_queue(vha, rsp);
2305
2306        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2307
2308        return QLA_SUCCESS;
2309
2310queuing_error:
2311        if (status & QDSS_GOT_Q_SPACE) {
2312                req->outstanding_cmds[handle] = NULL;
2313                req->cnt += req_cnt;
2314        }
2315        /* Cleanup will be performed by the caller (queuecommand) */
2316
2317        qla_put_iocbs(sp->qpair, &sp->iores);
2318        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2319
2320        return QLA_FUNCTION_FAILED;
2321}
2322
2323/* Generic Control-SRB manipulation functions. */
2324
2325/* hardware_lock assumed to be held. */
2326
2327void *
2328__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2329{
2330        scsi_qla_host_t *vha = qpair->vha;
2331        struct qla_hw_data *ha = vha->hw;
2332        struct req_que *req = qpair->req;
2333        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2334        uint32_t handle;
2335        request_t *pkt;
2336        uint16_t cnt, req_cnt;
2337
2338        pkt = NULL;
2339        req_cnt = 1;
2340        handle = 0;
2341
2342        if (sp && (sp->type != SRB_SCSI_CMD)) {
2343                /* Adjust entry-counts as needed. */
2344                req_cnt = sp->iocbs;
2345        }
2346
2347        /* Check for room on request queue. */
2348        if (req->cnt < req_cnt + 2) {
2349                if (qpair->use_shadow_reg)
2350                        cnt = *req->out_ptr;
2351                else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2352                    IS_QLA28XX(ha))
2353                        cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
2354                else if (IS_P3P_TYPE(ha))
2355                        cnt = rd_reg_dword(reg->isp82.req_q_out);
2356                else if (IS_FWI2_CAPABLE(ha))
2357                        cnt = rd_reg_dword(&reg->isp24.req_q_out);
2358                else if (IS_QLAFX00(ha))
2359                        cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
2360                else
2361                        cnt = qla2x00_debounce_register(
2362                            ISP_REQ_Q_OUT(ha, &reg->isp));
2363
2364                if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2365                        qla_schedule_eeh_work(vha);
2366                        return NULL;
2367                }
2368
2369                if  (req->ring_index < cnt)
2370                        req->cnt = cnt - req->ring_index;
2371                else
2372                        req->cnt = req->length -
2373                            (req->ring_index - cnt);
2374        }
2375        if (req->cnt < req_cnt + 2)
2376                goto queuing_error;
2377
2378        if (sp) {
2379                handle = qla2xxx_get_next_handle(req);
2380                if (handle == 0) {
2381                        ql_log(ql_log_warn, vha, 0x700b,
2382                            "No room on outstanding cmd array.\n");
2383                        goto queuing_error;
2384                }
2385
2386                /* Prep command array. */
2387                req->current_outstanding_cmd = handle;
2388                req->outstanding_cmds[handle] = sp;
2389                sp->handle = handle;
2390        }
2391
2392        /* Prep packet */
2393        req->cnt -= req_cnt;
2394        pkt = req->ring_ptr;
2395        memset(pkt, 0, REQUEST_ENTRY_SIZE);
2396        if (IS_QLAFX00(ha)) {
2397                wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2398                wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2399        } else {
2400                pkt->entry_count = req_cnt;
2401                pkt->handle = handle;
2402        }
2403
2404        return pkt;
2405
2406queuing_error:
2407        qpair->tgt_counters.num_alloc_iocb_failed++;
2408        return pkt;
2409}
2410
2411void *
2412qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2413{
2414        scsi_qla_host_t *vha = qpair->vha;
2415
2416        if (qla2x00_reset_active(vha))
2417                return NULL;
2418
2419        return __qla2x00_alloc_iocbs(qpair, sp);
2420}
2421
2422void *
2423qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2424{
2425        return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2426}
2427
2428static void
2429qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2430{
2431        struct srb_iocb *lio = &sp->u.iocb_cmd;
2432
2433        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2434        logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2435        if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2436                logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2437                if (sp->vha->flags.nvme_first_burst)
2438                        logio->io_parameter[0] =
2439                                cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2440                if (sp->vha->flags.nvme2_enabled) {
2441                        /* Set service parameter BIT_7 for NVME CONF support */
2442                        logio->io_parameter[0] |=
2443                                cpu_to_le32(NVME_PRLI_SP_CONF);
2444                        /* Set service parameter BIT_8 for SLER support */
2445                        logio->io_parameter[0] |=
2446                                cpu_to_le32(NVME_PRLI_SP_SLER);
2447                        /* Set service parameter BIT_9 for PI control support */
2448                        logio->io_parameter[0] |=
2449                                cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2450                }
2451        }
2452
2453        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2454        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2455        logio->port_id[1] = sp->fcport->d_id.b.area;
2456        logio->port_id[2] = sp->fcport->d_id.b.domain;
2457        logio->vp_index = sp->vha->vp_idx;
2458}
2459
2460static void
2461qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2462{
2463        struct srb_iocb *lio = &sp->u.iocb_cmd;
2464
2465        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2466        logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2467
2468        if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2469                logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2470        } else {
2471                logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2472                if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2473                        logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2474                if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2475                        logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2476                if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
2477                        logio->control_flags |=
2478                            cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
2479                        logio->io_parameter[0] =
2480                            cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
2481                }
2482        }
2483        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2484        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2485        logio->port_id[1] = sp->fcport->d_id.b.area;
2486        logio->port_id[2] = sp->fcport->d_id.b.domain;
2487        logio->vp_index = sp->vha->vp_idx;
2488}
2489
2490static void
2491qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2492{
2493        struct qla_hw_data *ha = sp->vha->hw;
2494        struct srb_iocb *lio = &sp->u.iocb_cmd;
2495        uint16_t opts;
2496
2497        mbx->entry_type = MBX_IOCB_TYPE;
2498        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2499        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2500        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2501        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2502        if (HAS_EXTENDED_IDS(ha)) {
2503                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2504                mbx->mb10 = cpu_to_le16(opts);
2505        } else {
2506                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2507        }
2508        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2509        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2510            sp->fcport->d_id.b.al_pa);
2511        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2512}
2513
2514static void
2515qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2516{
2517        u16 control_flags = LCF_COMMAND_LOGO;
2518        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2519
2520        if (sp->fcport->explicit_logout) {
2521                control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2522        } else {
2523                control_flags |= LCF_IMPL_LOGO;
2524
2525                if (!sp->fcport->keep_nport_handle)
2526                        control_flags |= LCF_FREE_NPORT;
2527        }
2528
2529        logio->control_flags = cpu_to_le16(control_flags);
2530        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2531        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2532        logio->port_id[1] = sp->fcport->d_id.b.area;
2533        logio->port_id[2] = sp->fcport->d_id.b.domain;
2534        logio->vp_index = sp->vha->vp_idx;
2535}
2536
2537static void
2538qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2539{
2540        struct qla_hw_data *ha = sp->vha->hw;
2541
2542        mbx->entry_type = MBX_IOCB_TYPE;
2543        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2544        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2545        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2546            cpu_to_le16(sp->fcport->loop_id) :
2547            cpu_to_le16(sp->fcport->loop_id << 8);
2548        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2549        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2550            sp->fcport->d_id.b.al_pa);
2551        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2552        /* Implicit: mbx->mbx10 = 0. */
2553}
2554
2555static void
2556qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2557{
2558        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2559        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2560        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2561        logio->vp_index = sp->vha->vp_idx;
2562}
2563
2564static void
2565qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2566{
2567        struct qla_hw_data *ha = sp->vha->hw;
2568
2569        mbx->entry_type = MBX_IOCB_TYPE;
2570        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2571        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2572        if (HAS_EXTENDED_IDS(ha)) {
2573                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2574                mbx->mb10 = cpu_to_le16(BIT_0);
2575        } else {
2576                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2577        }
2578        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2579        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2580        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2581        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2582        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2583}
2584
2585static void
2586qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2587{
2588        uint32_t flags;
2589        uint64_t lun;
2590        struct fc_port *fcport = sp->fcport;
2591        scsi_qla_host_t *vha = fcport->vha;
2592        struct qla_hw_data *ha = vha->hw;
2593        struct srb_iocb *iocb = &sp->u.iocb_cmd;
2594        struct req_que *req = vha->req;
2595
2596        flags = iocb->u.tmf.flags;
2597        lun = iocb->u.tmf.lun;
2598
2599        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2600        tsk->entry_count = 1;
2601        tsk->handle = make_handle(req->id, tsk->handle);
2602        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2603        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2604        tsk->control_flags = cpu_to_le32(flags);
2605        tsk->port_id[0] = fcport->d_id.b.al_pa;
2606        tsk->port_id[1] = fcport->d_id.b.area;
2607        tsk->port_id[2] = fcport->d_id.b.domain;
2608        tsk->vp_index = fcport->vha->vp_idx;
2609
2610        if (flags == TCF_LUN_RESET) {
2611                int_to_scsilun(lun, &tsk->lun);
2612                host_to_fcp_swap((uint8_t *)&tsk->lun,
2613                        sizeof(tsk->lun));
2614        }
2615}
2616
2617void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2618{
2619        timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2620        sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2621        sp->free = qla2x00_sp_free;
2622        if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2623                init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2624        sp->start_timer = 1;
2625}
2626
2627static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2628{
2629        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2630
2631        kfree(sp->fcport);
2632
2633        if (elsio->u.els_logo.els_logo_pyld)
2634                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2635                    elsio->u.els_logo.els_logo_pyld,
2636                    elsio->u.els_logo.els_logo_pyld_dma);
2637
2638        del_timer(&elsio->timer);
2639        qla2x00_rel_sp(sp);
2640}
2641
2642static void
2643qla2x00_els_dcmd_iocb_timeout(void *data)
2644{
2645        srb_t *sp = data;
2646        fc_port_t *fcport = sp->fcport;
2647        struct scsi_qla_host *vha = sp->vha;
2648        struct srb_iocb *lio = &sp->u.iocb_cmd;
2649        unsigned long flags = 0;
2650        int res, h;
2651
2652        ql_dbg(ql_dbg_io, vha, 0x3069,
2653            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2654            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2655            fcport->d_id.b.al_pa);
2656
2657        /* Abort the exchange */
2658        res = qla24xx_async_abort_cmd(sp, false);
2659        if (res) {
2660                ql_dbg(ql_dbg_io, vha, 0x3070,
2661                    "mbx abort_command failed.\n");
2662                spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2663                for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2664                        if (sp->qpair->req->outstanding_cmds[h] == sp) {
2665                                sp->qpair->req->outstanding_cmds[h] = NULL;
2666                                break;
2667                        }
2668                }
2669                spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2670                complete(&lio->u.els_logo.comp);
2671        } else {
2672                ql_dbg(ql_dbg_io, vha, 0x3071,
2673                    "mbx abort_command success.\n");
2674        }
2675}
2676
2677static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2678{
2679        fc_port_t *fcport = sp->fcport;
2680        struct srb_iocb *lio = &sp->u.iocb_cmd;
2681        struct scsi_qla_host *vha = sp->vha;
2682
2683        ql_dbg(ql_dbg_io, vha, 0x3072,
2684            "%s hdl=%x, portid=%02x%02x%02x done\n",
2685            sp->name, sp->handle, fcport->d_id.b.domain,
2686            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2687
2688        complete(&lio->u.els_logo.comp);
2689}
2690
2691int
2692qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2693    port_id_t remote_did)
2694{
2695        srb_t *sp;
2696        fc_port_t *fcport = NULL;
2697        struct srb_iocb *elsio = NULL;
2698        struct qla_hw_data *ha = vha->hw;
2699        struct els_logo_payload logo_pyld;
2700        int rval = QLA_SUCCESS;
2701
2702        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2703        if (!fcport) {
2704               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2705               return -ENOMEM;
2706        }
2707
2708        /* Alloc SRB structure */
2709        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2710        if (!sp) {
2711                kfree(fcport);
2712                ql_log(ql_log_info, vha, 0x70e6,
2713                 "SRB allocation failed\n");
2714                return -ENOMEM;
2715        }
2716
2717        elsio = &sp->u.iocb_cmd;
2718        fcport->loop_id = 0xFFFF;
2719        fcport->d_id.b.domain = remote_did.b.domain;
2720        fcport->d_id.b.area = remote_did.b.area;
2721        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2722
2723        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2724            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2725
2726        sp->type = SRB_ELS_DCMD;
2727        sp->name = "ELS_DCMD";
2728        sp->fcport = fcport;
2729        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2730        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2731        init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2732        sp->done = qla2x00_els_dcmd_sp_done;
2733        sp->free = qla2x00_els_dcmd_sp_free;
2734
2735        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2736                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2737                            GFP_KERNEL);
2738
2739        if (!elsio->u.els_logo.els_logo_pyld) {
2740                sp->free(sp);
2741                return QLA_FUNCTION_FAILED;
2742        }
2743
2744        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2745
2746        elsio->u.els_logo.els_cmd = els_opcode;
2747        logo_pyld.opcode = els_opcode;
2748        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2749        logo_pyld.s_id[1] = vha->d_id.b.area;
2750        logo_pyld.s_id[2] = vha->d_id.b.domain;
2751        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2752        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2753
2754        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2755            sizeof(struct els_logo_payload));
2756        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2757        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2758                       elsio->u.els_logo.els_logo_pyld,
2759                       sizeof(*elsio->u.els_logo.els_logo_pyld));
2760
2761        rval = qla2x00_start_sp(sp);
2762        if (rval != QLA_SUCCESS) {
2763                sp->free(sp);
2764                return QLA_FUNCTION_FAILED;
2765        }
2766
2767        ql_dbg(ql_dbg_io, vha, 0x3074,
2768            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2769            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2770            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2771
2772        wait_for_completion(&elsio->u.els_logo.comp);
2773
2774        sp->free(sp);
2775        return rval;
2776}
2777
2778static void
2779qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2780{
2781        scsi_qla_host_t *vha = sp->vha;
2782        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2783
2784        els_iocb->entry_type = ELS_IOCB_TYPE;
2785        els_iocb->entry_count = 1;
2786        els_iocb->sys_define = 0;
2787        els_iocb->entry_status = 0;
2788        els_iocb->handle = sp->handle;
2789        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2790        els_iocb->tx_dsd_count = cpu_to_le16(1);
2791        els_iocb->vp_index = vha->vp_idx;
2792        els_iocb->sof_type = EST_SOFI3;
2793        els_iocb->rx_dsd_count = 0;
2794        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2795
2796        els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2797        els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2798        els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2799        /* For SID the byte order is different than DID */
2800        els_iocb->s_id[1] = vha->d_id.b.al_pa;
2801        els_iocb->s_id[2] = vha->d_id.b.area;
2802        els_iocb->s_id[0] = vha->d_id.b.domain;
2803
2804        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2805                if (vha->hw->flags.edif_enabled)
2806                        els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
2807                else
2808                        els_iocb->control_flags = 0;
2809                els_iocb->tx_byte_count = els_iocb->tx_len =
2810                        cpu_to_le32(sizeof(struct els_plogi_payload));
2811                put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2812                                   &els_iocb->tx_address);
2813                els_iocb->rx_dsd_count = cpu_to_le16(1);
2814                els_iocb->rx_byte_count = els_iocb->rx_len =
2815                        cpu_to_le32(sizeof(struct els_plogi_payload));
2816                put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2817                                   &els_iocb->rx_address);
2818
2819                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2820                    "PLOGI ELS IOCB:\n");
2821                ql_dump_buffer(ql_log_info, vha, 0x0109,
2822                    (uint8_t *)els_iocb,
2823                    sizeof(*els_iocb));
2824        } else {
2825                els_iocb->tx_byte_count =
2826                        cpu_to_le32(sizeof(struct els_logo_payload));
2827                put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2828                                   &els_iocb->tx_address);
2829                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2830
2831                els_iocb->rx_byte_count = 0;
2832                els_iocb->rx_address = 0;
2833                els_iocb->rx_len = 0;
2834                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2835                       "LOGO ELS IOCB:");
2836                ql_dump_buffer(ql_log_info, vha, 0x010b,
2837                               els_iocb,
2838                               sizeof(*els_iocb));
2839        }
2840
2841        sp->vha->qla_stats.control_requests++;
2842}
2843
2844static void
2845qla2x00_els_dcmd2_iocb_timeout(void *data)
2846{
2847        srb_t *sp = data;
2848        fc_port_t *fcport = sp->fcport;
2849        struct scsi_qla_host *vha = sp->vha;
2850        unsigned long flags = 0;
2851        int res, h;
2852
2853        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2854            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2855            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2856
2857        /* Abort the exchange */
2858        res = qla24xx_async_abort_cmd(sp, false);
2859        ql_dbg(ql_dbg_io, vha, 0x3070,
2860            "mbx abort_command %s\n",
2861            (res == QLA_SUCCESS) ? "successful" : "failed");
2862        if (res) {
2863                spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2864                for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2865                        if (sp->qpair->req->outstanding_cmds[h] == sp) {
2866                                sp->qpair->req->outstanding_cmds[h] = NULL;
2867                                break;
2868                        }
2869                }
2870                spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2871                sp->done(sp, QLA_FUNCTION_TIMEOUT);
2872        }
2873}
2874
2875void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2876{
2877        if (els_plogi->els_plogi_pyld)
2878                dma_free_coherent(&vha->hw->pdev->dev,
2879                                  els_plogi->tx_size,
2880                                  els_plogi->els_plogi_pyld,
2881                                  els_plogi->els_plogi_pyld_dma);
2882
2883        if (els_plogi->els_resp_pyld)
2884                dma_free_coherent(&vha->hw->pdev->dev,
2885                                  els_plogi->rx_size,
2886                                  els_plogi->els_resp_pyld,
2887                                  els_plogi->els_resp_pyld_dma);
2888}
2889
2890static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2891{
2892        fc_port_t *fcport = sp->fcport;
2893        struct srb_iocb *lio = &sp->u.iocb_cmd;
2894        struct scsi_qla_host *vha = sp->vha;
2895        struct event_arg ea;
2896        struct qla_work_evt *e;
2897        struct fc_port *conflict_fcport;
2898        port_id_t cid;  /* conflict Nport id */
2899        const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2900        u16 lid;
2901
2902        ql_dbg(ql_dbg_disc, vha, 0x3072,
2903            "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2904            sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2905
2906        fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2907        del_timer(&sp->u.iocb_cmd.timer);
2908
2909        if (sp->flags & SRB_WAKEUP_ON_COMP)
2910                complete(&lio->u.els_plogi.comp);
2911        else {
2912                switch (le32_to_cpu(fw_status[0])) {
2913                case CS_DATA_UNDERRUN:
2914                case CS_COMPLETE:
2915                        memset(&ea, 0, sizeof(ea));
2916                        ea.fcport = fcport;
2917                        ea.rc = res;
2918                        qla_handle_els_plogi_done(vha, &ea);
2919                        break;
2920
2921                case CS_IOCB_ERROR:
2922                        switch (le32_to_cpu(fw_status[1])) {
2923                        case LSC_SCODE_PORTID_USED:
2924                                lid = le32_to_cpu(fw_status[2]) & 0xffff;
2925                                qlt_find_sess_invalidate_other(vha,
2926                                    wwn_to_u64(fcport->port_name),
2927                                    fcport->d_id, lid, &conflict_fcport);
2928                                if (conflict_fcport) {
2929                                        /*
2930                                         * Another fcport shares the same
2931                                         * loop_id & nport id; conflict
2932                                         * fcport needs to finish cleanup
2933                                         * before this fcport can proceed
2934                                         * to login.
2935                                         */
2936                                        conflict_fcport->conflict = fcport;
2937                                        fcport->login_pause = 1;
2938                                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
2939                                            "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2940                                            __func__, __LINE__,
2941                                            fcport->port_name,
2942                                            fcport->d_id.b24, lid);
2943                                } else {
2944                                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
2945                                            "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2946                                            __func__, __LINE__,
2947                                            fcport->port_name,
2948                                            fcport->d_id.b24, lid);
2949                                        qla2x00_clear_loop_id(fcport);
2950                                        set_bit(lid, vha->hw->loop_id_map);
2951                                        fcport->loop_id = lid;
2952                                        fcport->keep_nport_handle = 0;
2953                                        qlt_schedule_sess_for_deletion(fcport);
2954                                }
2955                                break;
2956
2957                        case LSC_SCODE_NPORT_USED:
2958                                cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2959                                        & 0xff;
2960                                cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
2961                                        & 0xff;
2962                                cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
2963                                cid.b.rsvd_1 = 0;
2964
2965                                ql_dbg(ql_dbg_disc, vha, 0x20ec,
2966                                    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2967                                    __func__, __LINE__, fcport->port_name,
2968                                    fcport->loop_id, cid.b24);
2969                                set_bit(fcport->loop_id,
2970                                    vha->hw->loop_id_map);
2971                                fcport->loop_id = FC_NO_LOOP_ID;
2972                                qla24xx_post_gnl_work(vha, fcport);
2973                                break;
2974
2975                        case LSC_SCODE_NOXCB:
2976                                vha->hw->exch_starvation++;
2977                                if (vha->hw->exch_starvation > 5) {
2978                                        ql_log(ql_log_warn, vha, 0xd046,
2979                                            "Exchange starvation. Resetting RISC\n");
2980                                        vha->hw->exch_starvation = 0;
2981                                        set_bit(ISP_ABORT_NEEDED,
2982                                            &vha->dpc_flags);
2983                                        qla2xxx_wake_dpc(vha);
2984                                }
2985                                /* fall through */
2986                        default:
2987                                ql_dbg(ql_dbg_disc, vha, 0x20eb,
2988                                    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2989                                    __func__, sp->fcport->port_name,
2990                                    fw_status[0], fw_status[1], fw_status[2]);
2991
2992                                fcport->flags &= ~FCF_ASYNC_SENT;
2993                                qla2x00_set_fcport_disc_state(fcport,
2994                                    DSC_LOGIN_FAILED);
2995                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2996                                break;
2997                        }
2998                        break;
2999
3000                default:
3001                        ql_dbg(ql_dbg_disc, vha, 0x20eb,
3002                            "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
3003                            __func__, sp->fcport->port_name,
3004                            fw_status[0], fw_status[1], fw_status[2]);
3005
3006                        sp->fcport->flags &= ~FCF_ASYNC_SENT;
3007                        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
3008                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3009                        break;
3010                }
3011
3012                e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3013                if (!e) {
3014                        struct srb_iocb *elsio = &sp->u.iocb_cmd;
3015
3016                        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3017                        sp->free(sp);
3018                        return;
3019                }
3020                e->u.iosb.sp = sp;
3021                qla2x00_post_work(vha, e);
3022        }
3023}
3024
3025int
3026qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3027    fc_port_t *fcport, bool wait)
3028{
3029        srb_t *sp;
3030        struct srb_iocb *elsio = NULL;
3031        struct qla_hw_data *ha = vha->hw;
3032        int rval = QLA_SUCCESS;
3033        void    *ptr, *resp_ptr;
3034
3035        /* Alloc SRB structure */
3036        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3037        if (!sp) {
3038                ql_log(ql_log_info, vha, 0x70e6,
3039                 "SRB allocation failed\n");
3040                fcport->flags &= ~FCF_ASYNC_ACTIVE;
3041                return -ENOMEM;
3042        }
3043
3044        fcport->flags |= FCF_ASYNC_SENT;
3045        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3046        elsio = &sp->u.iocb_cmd;
3047        ql_dbg(ql_dbg_io, vha, 0x3073,
3048               "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
3049
3050        sp->type = SRB_ELS_DCMD;
3051        sp->name = "ELS_DCMD";
3052        sp->fcport = fcport;
3053
3054        elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
3055        if (wait)
3056                sp->flags = SRB_WAKEUP_ON_COMP;
3057
3058        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
3059
3060        sp->done = qla2x00_els_dcmd2_sp_done;
3061        elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3062
3063        ptr = elsio->u.els_plogi.els_plogi_pyld =
3064            dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3065                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3066
3067        if (!elsio->u.els_plogi.els_plogi_pyld) {
3068                rval = QLA_FUNCTION_FAILED;
3069                goto out;
3070        }
3071
3072        resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3073            dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3074                &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3075
3076        if (!elsio->u.els_plogi.els_resp_pyld) {
3077                rval = QLA_FUNCTION_FAILED;
3078                goto out;
3079        }
3080
3081        ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3082
3083        memset(ptr, 0, sizeof(struct els_plogi_payload));
3084        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3085        memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3086            &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
3087
3088        elsio->u.els_plogi.els_cmd = els_opcode;
3089        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3090
3091        if (els_opcode == ELS_DCMD_PLOGI && vha->hw->flags.edif_enabled &&
3092            vha->e_dbell.db_flags & EDB_ACTIVE) {
3093                struct fc_els_flogi *p = ptr;
3094
3095                p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
3096        }
3097
3098        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3099        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3100            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3101            sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3102
3103        init_completion(&elsio->u.els_plogi.comp);
3104        rval = qla2x00_start_sp(sp);
3105        if (rval != QLA_SUCCESS) {
3106                rval = QLA_FUNCTION_FAILED;
3107        } else {
3108                ql_dbg(ql_dbg_disc, vha, 0x3074,
3109                    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3110                    sp->name, sp->handle, fcport->loop_id,
3111                    fcport->d_id.b24, vha->d_id.b24);
3112        }
3113
3114        if (wait) {
3115                wait_for_completion(&elsio->u.els_plogi.comp);
3116
3117                if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3118                        rval = QLA_FUNCTION_FAILED;
3119        } else {
3120                goto done;
3121        }
3122
3123out:
3124        fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3125        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3126        sp->free(sp);
3127done:
3128        return rval;
3129}
3130
3131/* it is assume qpair lock is held */
3132void qla_els_pt_iocb(struct scsi_qla_host *vha,
3133        struct els_entry_24xx *els_iocb,
3134        struct qla_els_pt_arg *a)
3135{
3136        els_iocb->entry_type = ELS_IOCB_TYPE;
3137        els_iocb->entry_count = 1;
3138        els_iocb->sys_define = 0;
3139        els_iocb->entry_status = 0;
3140        els_iocb->handle = QLA_SKIP_HANDLE;
3141        els_iocb->nport_handle = a->nport_handle;
3142        els_iocb->rx_xchg_address = a->rx_xchg_address;
3143        els_iocb->tx_dsd_count = cpu_to_le16(1);
3144        els_iocb->vp_index = a->vp_idx;
3145        els_iocb->sof_type = EST_SOFI3;
3146        els_iocb->rx_dsd_count = cpu_to_le16(0);
3147        els_iocb->opcode = a->els_opcode;
3148
3149        els_iocb->d_id[0] = a->did.b.al_pa;
3150        els_iocb->d_id[1] = a->did.b.area;
3151        els_iocb->d_id[2] = a->did.b.domain;
3152        /* For SID the byte order is different than DID */
3153        els_iocb->s_id[1] = vha->d_id.b.al_pa;
3154        els_iocb->s_id[2] = vha->d_id.b.area;
3155        els_iocb->s_id[0] = vha->d_id.b.domain;
3156
3157        els_iocb->control_flags = cpu_to_le16(a->control_flags);
3158
3159        els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
3160        els_iocb->tx_len = cpu_to_le32(a->tx_len);
3161        put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
3162
3163        els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
3164        els_iocb->rx_len = cpu_to_le32(a->rx_len);
3165        put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
3166}
3167
3168static void
3169qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3170{
3171        struct bsg_job *bsg_job = sp->u.bsg_job;
3172        struct fc_bsg_request *bsg_request = bsg_job->request;
3173
3174        els_iocb->entry_type = ELS_IOCB_TYPE;
3175        els_iocb->entry_count = 1;
3176        els_iocb->sys_define = 0;
3177        els_iocb->entry_status = 0;
3178        els_iocb->handle = sp->handle;
3179        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3180        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3181        els_iocb->vp_index = sp->vha->vp_idx;
3182        els_iocb->sof_type = EST_SOFI3;
3183        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3184
3185        els_iocb->opcode =
3186            sp->type == SRB_ELS_CMD_RPT ?
3187            bsg_request->rqst_data.r_els.els_code :
3188            bsg_request->rqst_data.h_els.command_code;
3189        els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3190        els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3191        els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3192        els_iocb->control_flags = 0;
3193        els_iocb->rx_byte_count =
3194            cpu_to_le32(bsg_job->reply_payload.payload_len);
3195        els_iocb->tx_byte_count =
3196            cpu_to_le32(bsg_job->request_payload.payload_len);
3197
3198        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3199                           &els_iocb->tx_address);
3200        els_iocb->tx_len = cpu_to_le32(sg_dma_len
3201            (bsg_job->request_payload.sg_list));
3202
3203        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3204                           &els_iocb->rx_address);
3205        els_iocb->rx_len = cpu_to_le32(sg_dma_len
3206            (bsg_job->reply_payload.sg_list));
3207
3208        sp->vha->qla_stats.control_requests++;
3209}
3210
3211static void
3212qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3213{
3214        uint16_t        avail_dsds;
3215        struct dsd64    *cur_dsd;
3216        struct scatterlist *sg;
3217        int index;
3218        uint16_t tot_dsds;
3219        scsi_qla_host_t *vha = sp->vha;
3220        struct qla_hw_data *ha = vha->hw;
3221        struct bsg_job *bsg_job = sp->u.bsg_job;
3222        int entry_count = 1;
3223
3224        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3225        ct_iocb->entry_type = CT_IOCB_TYPE;
3226        ct_iocb->entry_status = 0;
3227        ct_iocb->handle1 = sp->handle;
3228        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3229        ct_iocb->status = cpu_to_le16(0);
3230        ct_iocb->control_flags = cpu_to_le16(0);
3231        ct_iocb->timeout = 0;
3232        ct_iocb->cmd_dsd_count =
3233            cpu_to_le16(bsg_job->request_payload.sg_cnt);
3234        ct_iocb->total_dsd_count =
3235            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3236        ct_iocb->req_bytecount =
3237            cpu_to_le32(bsg_job->request_payload.payload_len);
3238        ct_iocb->rsp_bytecount =
3239            cpu_to_le32(bsg_job->reply_payload.payload_len);
3240
3241        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3242                           &ct_iocb->req_dsd.address);
3243        ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3244
3245        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3246                           &ct_iocb->rsp_dsd.address);
3247        ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3248
3249        avail_dsds = 1;
3250        cur_dsd = &ct_iocb->rsp_dsd;
3251        index = 0;
3252        tot_dsds = bsg_job->reply_payload.sg_cnt;
3253
3254        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3255                cont_a64_entry_t *cont_pkt;
3256
3257                /* Allocate additional continuation packets? */
3258                if (avail_dsds == 0) {
3259                        /*
3260                        * Five DSDs are available in the Cont.
3261                        * Type 1 IOCB.
3262                               */
3263                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3264                            vha->hw->req_q_map[0]);
3265                        cur_dsd = cont_pkt->dsd;
3266                        avail_dsds = 5;
3267                        entry_count++;
3268                }
3269
3270                append_dsd64(&cur_dsd, sg);
3271                avail_dsds--;
3272        }
3273        ct_iocb->entry_count = entry_count;
3274
3275        sp->vha->qla_stats.control_requests++;
3276}
3277
3278static void
3279qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3280{
3281        uint16_t        avail_dsds;
3282        struct dsd64    *cur_dsd;
3283        struct scatterlist *sg;
3284        int index;
3285        uint16_t cmd_dsds, rsp_dsds;
3286        scsi_qla_host_t *vha = sp->vha;
3287        struct qla_hw_data *ha = vha->hw;
3288        struct bsg_job *bsg_job = sp->u.bsg_job;
3289        int entry_count = 1;
3290        cont_a64_entry_t *cont_pkt = NULL;
3291
3292        ct_iocb->entry_type = CT_IOCB_TYPE;
3293        ct_iocb->entry_status = 0;
3294        ct_iocb->sys_define = 0;
3295        ct_iocb->handle = sp->handle;
3296
3297        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3298        ct_iocb->vp_index = sp->vha->vp_idx;
3299        ct_iocb->comp_status = cpu_to_le16(0);
3300
3301        cmd_dsds = bsg_job->request_payload.sg_cnt;
3302        rsp_dsds = bsg_job->reply_payload.sg_cnt;
3303
3304        ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3305        ct_iocb->timeout = 0;
3306        ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3307        ct_iocb->cmd_byte_count =
3308            cpu_to_le32(bsg_job->request_payload.payload_len);
3309
3310        avail_dsds = 2;
3311        cur_dsd = ct_iocb->dsd;
3312        index = 0;
3313
3314        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3315                /* Allocate additional continuation packets? */
3316                if (avail_dsds == 0) {
3317                        /*
3318                         * Five DSDs are available in the Cont.
3319                         * Type 1 IOCB.
3320                         */
3321                        cont_pkt = qla2x00_prep_cont_type1_iocb(
3322                            vha, ha->req_q_map[0]);
3323                        cur_dsd = cont_pkt->dsd;
3324                        avail_dsds = 5;
3325                        entry_count++;
3326                }
3327
3328                append_dsd64(&cur_dsd, sg);
3329                avail_dsds--;
3330        }
3331
3332        index = 0;
3333
3334        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3335                /* Allocate additional continuation packets? */
3336                if (avail_dsds == 0) {
3337                        /*
3338                        * Five DSDs are available in the Cont.
3339                        * Type 1 IOCB.
3340                               */
3341                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3342                            ha->req_q_map[0]);
3343                        cur_dsd = cont_pkt->dsd;
3344                        avail_dsds = 5;
3345                        entry_count++;
3346                }
3347
3348                append_dsd64(&cur_dsd, sg);
3349                avail_dsds--;
3350        }
3351        ct_iocb->entry_count = entry_count;
3352}
3353
3354/*
3355 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3356 * @sp: command to send to the ISP
3357 *
3358 * Returns non-zero if a failure occurred, else zero.
3359 */
3360int
3361qla82xx_start_scsi(srb_t *sp)
3362{
3363        int             nseg;
3364        unsigned long   flags;
3365        struct scsi_cmnd *cmd;
3366        uint32_t        *clr_ptr;
3367        uint32_t        handle;
3368        uint16_t        cnt;
3369        uint16_t        req_cnt;
3370        uint16_t        tot_dsds;
3371        struct device_reg_82xx __iomem *reg;
3372        uint32_t dbval;
3373        __be32 *fcp_dl;
3374        uint8_t additional_cdb_len;
3375        struct ct6_dsd *ctx;
3376        struct scsi_qla_host *vha = sp->vha;
3377        struct qla_hw_data *ha = vha->hw;
3378        struct req_que *req = NULL;
3379        struct rsp_que *rsp = NULL;
3380
3381        /* Setup device pointers. */
3382        reg = &ha->iobase->isp82;
3383        cmd = GET_CMD_SP(sp);
3384        req = vha->req;
3385        rsp = ha->rsp_q_map[0];
3386
3387        /* So we know we haven't pci_map'ed anything yet */
3388        tot_dsds = 0;
3389
3390        dbval = 0x04 | (ha->portnum << 5);
3391
3392        /* Send marker if required */
3393        if (vha->marker_needed != 0) {
3394                if (qla2x00_marker(vha, ha->base_qpair,
3395                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3396                        ql_log(ql_log_warn, vha, 0x300c,
3397                            "qla2x00_marker failed for cmd=%p.\n", cmd);
3398                        return QLA_FUNCTION_FAILED;
3399                }
3400                vha->marker_needed = 0;
3401        }
3402
3403        /* Acquire ring specific lock */
3404        spin_lock_irqsave(&ha->hardware_lock, flags);
3405
3406        handle = qla2xxx_get_next_handle(req);
3407        if (handle == 0)
3408                goto queuing_error;
3409
3410        /* Map the sg table so we have an accurate count of sg entries needed */
3411        if (scsi_sg_count(cmd)) {
3412                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3413                    scsi_sg_count(cmd), cmd->sc_data_direction);
3414                if (unlikely(!nseg))
3415                        goto queuing_error;
3416        } else
3417                nseg = 0;
3418
3419        tot_dsds = nseg;
3420
3421        if (tot_dsds > ql2xshiftctondsd) {
3422                struct cmd_type_6 *cmd_pkt;
3423                uint16_t more_dsd_lists = 0;
3424                struct dsd_dma *dsd_ptr;
3425                uint16_t i;
3426
3427                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3428                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3429                        ql_dbg(ql_dbg_io, vha, 0x300d,
3430                            "Num of DSD list %d is than %d for cmd=%p.\n",
3431                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3432                            cmd);
3433                        goto queuing_error;
3434                }
3435
3436                if (more_dsd_lists <= ha->gbl_dsd_avail)
3437                        goto sufficient_dsds;
3438                else
3439                        more_dsd_lists -= ha->gbl_dsd_avail;
3440
3441                for (i = 0; i < more_dsd_lists; i++) {
3442                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3443                        if (!dsd_ptr) {
3444                                ql_log(ql_log_fatal, vha, 0x300e,
3445                                    "Failed to allocate memory for dsd_dma "
3446                                    "for cmd=%p.\n", cmd);
3447                                goto queuing_error;
3448                        }
3449
3450                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3451                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3452                        if (!dsd_ptr->dsd_addr) {
3453                                kfree(dsd_ptr);
3454                                ql_log(ql_log_fatal, vha, 0x300f,
3455                                    "Failed to allocate memory for dsd_addr "
3456                                    "for cmd=%p.\n", cmd);
3457                                goto queuing_error;
3458                        }
3459                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3460                        ha->gbl_dsd_avail++;
3461                }
3462
3463sufficient_dsds:
3464                req_cnt = 1;
3465
3466                if (req->cnt < (req_cnt + 2)) {
3467                        cnt = (uint16_t)rd_reg_dword_relaxed(
3468                                &reg->req_q_out[0]);
3469                        if (req->ring_index < cnt)
3470                                req->cnt = cnt - req->ring_index;
3471                        else
3472                                req->cnt = req->length -
3473                                        (req->ring_index - cnt);
3474                        if (req->cnt < (req_cnt + 2))
3475                                goto queuing_error;
3476                }
3477
3478                ctx = sp->u.scmd.ct6_ctx =
3479                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3480                if (!ctx) {
3481                        ql_log(ql_log_fatal, vha, 0x3010,
3482                            "Failed to allocate ctx for cmd=%p.\n", cmd);
3483                        goto queuing_error;
3484                }
3485
3486                memset(ctx, 0, sizeof(struct ct6_dsd));
3487                ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3488                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3489                if (!ctx->fcp_cmnd) {
3490                        ql_log(ql_log_fatal, vha, 0x3011,
3491                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3492                        goto queuing_error;
3493                }
3494
3495                /* Initialize the DSD list and dma handle */
3496                INIT_LIST_HEAD(&ctx->dsd_list);
3497                ctx->dsd_use_cnt = 0;
3498
3499                if (cmd->cmd_len > 16) {
3500                        additional_cdb_len = cmd->cmd_len - 16;
3501                        if ((cmd->cmd_len % 4) != 0) {
3502                                /* SCSI command bigger than 16 bytes must be
3503                                 * multiple of 4
3504                                 */
3505                                ql_log(ql_log_warn, vha, 0x3012,
3506                                    "scsi cmd len %d not multiple of 4 "
3507                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
3508                                goto queuing_error_fcp_cmnd;
3509                        }
3510                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3511                } else {
3512                        additional_cdb_len = 0;
3513                        ctx->fcp_cmnd_len = 12 + 16 + 4;
3514                }
3515
3516                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3517                cmd_pkt->handle = make_handle(req->id, handle);
3518
3519                /* Zero out remaining portion of packet. */
3520                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3521                clr_ptr = (uint32_t *)cmd_pkt + 2;
3522                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3523                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3524
3525                /* Set NPORT-ID and LUN number*/
3526                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3527                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3528                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3529                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3530                cmd_pkt->vp_index = sp->vha->vp_idx;
3531
3532                /* Build IOCB segments */
3533                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3534                        goto queuing_error_fcp_cmnd;
3535
3536                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3537                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3538
3539                /* build FCP_CMND IU */
3540                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3541                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3542
3543                if (cmd->sc_data_direction == DMA_TO_DEVICE)
3544                        ctx->fcp_cmnd->additional_cdb_len |= 1;
3545                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3546                        ctx->fcp_cmnd->additional_cdb_len |= 2;
3547
3548                /* Populate the FCP_PRIO. */
3549                if (ha->flags.fcp_prio_enabled)
3550                        ctx->fcp_cmnd->task_attribute |=
3551                            sp->fcport->fcp_prio << 3;
3552
3553                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3554
3555                fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3556                    additional_cdb_len);
3557                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3558
3559                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3560                put_unaligned_le64(ctx->fcp_cmnd_dma,
3561                                   &cmd_pkt->fcp_cmnd_dseg_address);
3562
3563                sp->flags |= SRB_FCP_CMND_DMA_VALID;
3564                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3565                /* Set total data segment count. */
3566                cmd_pkt->entry_count = (uint8_t)req_cnt;
3567                /* Specify response queue number where
3568                 * completion should happen
3569                 */
3570                cmd_pkt->entry_status = (uint8_t) rsp->id;
3571        } else {
3572                struct cmd_type_7 *cmd_pkt;
3573
3574                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3575                if (req->cnt < (req_cnt + 2)) {
3576                        cnt = (uint16_t)rd_reg_dword_relaxed(
3577                            &reg->req_q_out[0]);
3578                        if (req->ring_index < cnt)
3579                                req->cnt = cnt - req->ring_index;
3580                        else
3581                                req->cnt = req->length -
3582                                        (req->ring_index - cnt);
3583                }
3584                if (req->cnt < (req_cnt + 2))
3585                        goto queuing_error;
3586
3587                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3588                cmd_pkt->handle = make_handle(req->id, handle);
3589
3590                /* Zero out remaining portion of packet. */
3591                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3592                clr_ptr = (uint32_t *)cmd_pkt + 2;
3593                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3594                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3595
3596                /* Set NPORT-ID and LUN number*/
3597                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3598                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3599                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3600                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3601                cmd_pkt->vp_index = sp->vha->vp_idx;
3602
3603                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3604                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3605                    sizeof(cmd_pkt->lun));
3606
3607                /* Populate the FCP_PRIO. */
3608                if (ha->flags.fcp_prio_enabled)
3609                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3610
3611                /* Load SCSI command packet. */
3612                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3613                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3614
3615                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3616
3617                /* Build IOCB segments */
3618                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3619
3620                /* Set total data segment count. */
3621                cmd_pkt->entry_count = (uint8_t)req_cnt;
3622                /* Specify response queue number where
3623                 * completion should happen.
3624                 */
3625                cmd_pkt->entry_status = (uint8_t) rsp->id;
3626
3627        }
3628        /* Build command packet. */
3629        req->current_outstanding_cmd = handle;
3630        req->outstanding_cmds[handle] = sp;
3631        sp->handle = handle;
3632        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3633        req->cnt -= req_cnt;
3634        wmb();
3635
3636        /* Adjust ring index. */
3637        req->ring_index++;
3638        if (req->ring_index == req->length) {
3639                req->ring_index = 0;
3640                req->ring_ptr = req->ring;
3641        } else
3642                req->ring_ptr++;
3643
3644        sp->flags |= SRB_DMA_VALID;
3645
3646        /* Set chip new ring index. */
3647        /* write, read and verify logic */
3648        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3649        if (ql2xdbwr)
3650                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3651        else {
3652                wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3653                wmb();
3654                while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3655                        wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3656                        wmb();
3657                }
3658        }
3659
3660        /* Manage unprocessed RIO/ZIO commands in response queue. */
3661        if (vha->flags.process_response_queue &&
3662            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3663                qla24xx_process_response_queue(vha, rsp);
3664
3665        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3666        return QLA_SUCCESS;
3667
3668queuing_error_fcp_cmnd:
3669        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3670queuing_error:
3671        if (tot_dsds)
3672                scsi_dma_unmap(cmd);
3673
3674        if (sp->u.scmd.crc_ctx) {
3675                mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3676                sp->u.scmd.crc_ctx = NULL;
3677        }
3678        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3679
3680        return QLA_FUNCTION_FAILED;
3681}
3682
3683static void
3684qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3685{
3686        struct srb_iocb *aio = &sp->u.iocb_cmd;
3687        scsi_qla_host_t *vha = sp->vha;
3688        struct req_que *req = sp->qpair->req;
3689        srb_t *orig_sp = sp->cmd_sp;
3690
3691        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3692        abt_iocb->entry_type = ABORT_IOCB_TYPE;
3693        abt_iocb->entry_count = 1;
3694        abt_iocb->handle = make_handle(req->id, sp->handle);
3695        if (sp->fcport) {
3696                abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3697                abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3698                abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3699                abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3700        }
3701        abt_iocb->handle_to_abort =
3702                make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3703                            aio->u.abt.cmd_hndl);
3704        abt_iocb->vp_index = vha->vp_idx;
3705        abt_iocb->req_que_no = aio->u.abt.req_que_no;
3706
3707        /* need to pass original sp */
3708        if (orig_sp)
3709                qla_nvme_abort_set_option(abt_iocb, orig_sp);
3710
3711        /* Send the command to the firmware */
3712        wmb();
3713}
3714
3715static void
3716qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3717{
3718        int i, sz;
3719
3720        mbx->entry_type = MBX_IOCB_TYPE;
3721        mbx->handle = sp->handle;
3722        sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3723
3724        for (i = 0; i < sz; i++)
3725                mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3726}
3727
3728static void
3729qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3730{
3731        sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3732        qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3733        ct_pkt->handle = sp->handle;
3734}
3735
3736static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3737        struct nack_to_isp *nack)
3738{
3739        struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3740
3741        nack->entry_type = NOTIFY_ACK_TYPE;
3742        nack->entry_count = 1;
3743        nack->ox_id = ntfy->ox_id;
3744
3745        nack->u.isp24.handle = sp->handle;
3746        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3747        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3748                nack->u.isp24.flags = ntfy->u.isp24.flags &
3749                        cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3750        }
3751        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3752        nack->u.isp24.status = ntfy->u.isp24.status;
3753        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3754        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3755        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3756        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3757        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3758        nack->u.isp24.srr_flags = 0;
3759        nack->u.isp24.srr_reject_code = 0;
3760        nack->u.isp24.srr_reject_code_expl = 0;
3761        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3762
3763        if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
3764            (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
3765            sp->vha->hw->flags.edif_enabled) {
3766                ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
3767                    "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
3768                    sp->name, sp->handle, sp->fcport->loop_id,
3769                    sp->fcport->d_id.b24);
3770                nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
3771        }
3772}
3773
3774/*
3775 * Build NVME LS request
3776 */
3777static void
3778qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3779{
3780        struct srb_iocb *nvme;
3781
3782        nvme = &sp->u.iocb_cmd;
3783        cmd_pkt->entry_type = PT_LS4_REQUEST;
3784        cmd_pkt->entry_count = 1;
3785        cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3786
3787        cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3788        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3789        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3790
3791        cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3792        cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3793        cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3794        put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3795
3796        cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3797        cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3798        cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3799        put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3800}
3801
3802static void
3803qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3804{
3805        int map, pos;
3806
3807        vce->entry_type = VP_CTRL_IOCB_TYPE;
3808        vce->handle = sp->handle;
3809        vce->entry_count = 1;
3810        vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3811        vce->vp_count = cpu_to_le16(1);
3812
3813        /*
3814         * index map in firmware starts with 1; decrement index
3815         * this is ok as we never use index 0
3816         */
3817        map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3818        pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3819        vce->vp_idx_map[map] |= 1 << pos;
3820}
3821
3822static void
3823qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3824{
3825        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3826        logio->control_flags =
3827            cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3828
3829        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3830        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3831        logio->port_id[1] = sp->fcport->d_id.b.area;
3832        logio->port_id[2] = sp->fcport->d_id.b.domain;
3833        logio->vp_index = sp->fcport->vha->vp_idx;
3834}
3835
3836int
3837qla2x00_start_sp(srb_t *sp)
3838{
3839        int rval = QLA_SUCCESS;
3840        scsi_qla_host_t *vha = sp->vha;
3841        struct qla_hw_data *ha = vha->hw;
3842        struct qla_qpair *qp = sp->qpair;
3843        void *pkt;
3844        unsigned long flags;
3845
3846        if (vha->hw->flags.eeh_busy)
3847                return -EIO;
3848
3849        spin_lock_irqsave(qp->qp_lock_ptr, flags);
3850        pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3851        if (!pkt) {
3852                rval = EAGAIN;
3853                ql_log(ql_log_warn, vha, 0x700c,
3854                    "qla2x00_alloc_iocbs failed.\n");
3855                goto done;
3856        }
3857
3858        switch (sp->type) {
3859        case SRB_LOGIN_CMD:
3860                IS_FWI2_CAPABLE(ha) ?
3861                    qla24xx_login_iocb(sp, pkt) :
3862                    qla2x00_login_iocb(sp, pkt);
3863                break;
3864        case SRB_PRLI_CMD:
3865                qla24xx_prli_iocb(sp, pkt);
3866                break;
3867        case SRB_LOGOUT_CMD:
3868                IS_FWI2_CAPABLE(ha) ?
3869                    qla24xx_logout_iocb(sp, pkt) :
3870                    qla2x00_logout_iocb(sp, pkt);
3871                break;
3872        case SRB_ELS_CMD_RPT:
3873        case SRB_ELS_CMD_HST:
3874                qla24xx_els_iocb(sp, pkt);
3875                break;
3876        case SRB_ELS_CMD_HST_NOLOGIN:
3877                qla_els_pt_iocb(sp->vha, pkt,  &sp->u.bsg_cmd.u.els_arg);
3878                ((struct els_entry_24xx *)pkt)->handle = sp->handle;
3879                break;
3880        case SRB_CT_CMD:
3881                IS_FWI2_CAPABLE(ha) ?
3882                    qla24xx_ct_iocb(sp, pkt) :
3883                    qla2x00_ct_iocb(sp, pkt);
3884                break;
3885        case SRB_ADISC_CMD:
3886                IS_FWI2_CAPABLE(ha) ?
3887                    qla24xx_adisc_iocb(sp, pkt) :
3888                    qla2x00_adisc_iocb(sp, pkt);
3889                break;
3890        case SRB_TM_CMD:
3891                IS_QLAFX00(ha) ?
3892                    qlafx00_tm_iocb(sp, pkt) :
3893                    qla24xx_tm_iocb(sp, pkt);
3894                break;
3895        case SRB_FXIOCB_DCMD:
3896        case SRB_FXIOCB_BCMD:
3897                qlafx00_fxdisc_iocb(sp, pkt);
3898                break;
3899        case SRB_NVME_LS:
3900                qla_nvme_ls(sp, pkt);
3901                break;
3902        case SRB_ABT_CMD:
3903                IS_QLAFX00(ha) ?
3904                        qlafx00_abort_iocb(sp, pkt) :
3905                        qla24xx_abort_iocb(sp, pkt);
3906                break;
3907        case SRB_ELS_DCMD:
3908                qla24xx_els_logo_iocb(sp, pkt);
3909                break;
3910        case SRB_CT_PTHRU_CMD:
3911                qla2x00_ctpthru_cmd_iocb(sp, pkt);
3912                break;
3913        case SRB_MB_IOCB:
3914                qla2x00_mb_iocb(sp, pkt);
3915                break;
3916        case SRB_NACK_PLOGI:
3917        case SRB_NACK_PRLI:
3918        case SRB_NACK_LOGO:
3919                qla2x00_send_notify_ack_iocb(sp, pkt);
3920                break;
3921        case SRB_CTRL_VP:
3922                qla25xx_ctrlvp_iocb(sp, pkt);
3923                break;
3924        case SRB_PRLO_CMD:
3925                qla24xx_prlo_iocb(sp, pkt);
3926                break;
3927        case SRB_SA_UPDATE:
3928                qla24xx_sa_update_iocb(sp, pkt);
3929                break;
3930        case SRB_SA_REPLACE:
3931                qla24xx_sa_replace_iocb(sp, pkt);
3932                break;
3933        default:
3934                break;
3935        }
3936
3937        if (sp->start_timer)
3938                add_timer(&sp->u.iocb_cmd.timer);
3939
3940        wmb();
3941        qla2x00_start_iocbs(vha, qp->req);
3942done:
3943        spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3944        return rval;
3945}
3946
3947static void
3948qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3949                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3950{
3951        uint16_t avail_dsds;
3952        struct dsd64 *cur_dsd;
3953        uint32_t req_data_len = 0;
3954        uint32_t rsp_data_len = 0;
3955        struct scatterlist *sg;
3956        int index;
3957        int entry_count = 1;
3958        struct bsg_job *bsg_job = sp->u.bsg_job;
3959
3960        /*Update entry type to indicate bidir command */
3961        put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3962
3963        /* Set the transfer direction, in this set both flags
3964         * Also set the BD_WRAP_BACK flag, firmware will take care
3965         * assigning DID=SID for outgoing pkts.
3966         */
3967        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3968        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3969        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3970                                                        BD_WRAP_BACK);
3971
3972        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3973        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3974        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3975        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3976
3977        vha->bidi_stats.transfer_bytes += req_data_len;
3978        vha->bidi_stats.io_count++;
3979
3980        vha->qla_stats.output_bytes += req_data_len;
3981        vha->qla_stats.output_requests++;
3982
3983        /* Only one dsd is available for bidirectional IOCB, remaining dsds
3984         * are bundled in continuation iocb
3985         */
3986        avail_dsds = 1;
3987        cur_dsd = &cmd_pkt->fcp_dsd;
3988
3989        index = 0;
3990
3991        for_each_sg(bsg_job->request_payload.sg_list, sg,
3992                                bsg_job->request_payload.sg_cnt, index) {
3993                cont_a64_entry_t *cont_pkt;
3994
3995                /* Allocate additional continuation packets */
3996                if (avail_dsds == 0) {
3997                        /* Continuation type 1 IOCB can accomodate
3998                         * 5 DSDS
3999                         */
4000                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4001                        cur_dsd = cont_pkt->dsd;
4002                        avail_dsds = 5;
4003                        entry_count++;
4004                }
4005                append_dsd64(&cur_dsd, sg);
4006                avail_dsds--;
4007        }
4008        /* For read request DSD will always goes to continuation IOCB
4009         * and follow the write DSD. If there is room on the current IOCB
4010         * then it is added to that IOCB else new continuation IOCB is
4011         * allocated.
4012         */
4013        for_each_sg(bsg_job->reply_payload.sg_list, sg,
4014                                bsg_job->reply_payload.sg_cnt, index) {
4015                cont_a64_entry_t *cont_pkt;
4016
4017                /* Allocate additional continuation packets */
4018                if (avail_dsds == 0) {
4019                        /* Continuation type 1 IOCB can accomodate
4020                         * 5 DSDS
4021                         */
4022                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4023                        cur_dsd = cont_pkt->dsd;
4024                        avail_dsds = 5;
4025                        entry_count++;
4026                }
4027                append_dsd64(&cur_dsd, sg);
4028                avail_dsds--;
4029        }
4030        /* This value should be same as number of IOCB required for this cmd */
4031        cmd_pkt->entry_count = entry_count;
4032}
4033
4034int
4035qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
4036{
4037
4038        struct qla_hw_data *ha = vha->hw;
4039        unsigned long flags;
4040        uint32_t handle;
4041        uint16_t req_cnt;
4042        uint16_t cnt;
4043        uint32_t *clr_ptr;
4044        struct cmd_bidir *cmd_pkt = NULL;
4045        struct rsp_que *rsp;
4046        struct req_que *req;
4047        int rval = EXT_STATUS_OK;
4048
4049        rval = QLA_SUCCESS;
4050
4051        rsp = ha->rsp_q_map[0];
4052        req = vha->req;
4053
4054        /* Send marker if required */
4055        if (vha->marker_needed != 0) {
4056                if (qla2x00_marker(vha, ha->base_qpair,
4057                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
4058                        return EXT_STATUS_MAILBOX;
4059                vha->marker_needed = 0;
4060        }
4061
4062        /* Acquire ring specific lock */
4063        spin_lock_irqsave(&ha->hardware_lock, flags);
4064
4065        handle = qla2xxx_get_next_handle(req);
4066        if (handle == 0) {
4067                rval = EXT_STATUS_BUSY;
4068                goto queuing_error;
4069        }
4070
4071        /* Calculate number of IOCB required */
4072        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
4073
4074        /* Check for room on request queue. */
4075        if (req->cnt < req_cnt + 2) {
4076                if (IS_SHADOW_REG_CAPABLE(ha)) {
4077                        cnt = *req->out_ptr;
4078                } else {
4079                        cnt = rd_reg_dword_relaxed(req->req_q_out);
4080                        if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4081                                goto queuing_error;
4082                }
4083
4084                if  (req->ring_index < cnt)
4085                        req->cnt = cnt - req->ring_index;
4086                else
4087                        req->cnt = req->length -
4088                                (req->ring_index - cnt);
4089        }
4090        if (req->cnt < req_cnt + 2) {
4091                rval = EXT_STATUS_BUSY;
4092                goto queuing_error;
4093        }
4094
4095        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
4096        cmd_pkt->handle = make_handle(req->id, handle);
4097
4098        /* Zero out remaining portion of packet. */
4099        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4100        clr_ptr = (uint32_t *)cmd_pkt + 2;
4101        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4102
4103        /* Set NPORT-ID  (of vha)*/
4104        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
4105        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
4106        cmd_pkt->port_id[1] = vha->d_id.b.area;
4107        cmd_pkt->port_id[2] = vha->d_id.b.domain;
4108
4109        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
4110        cmd_pkt->entry_status = (uint8_t) rsp->id;
4111        /* Build command packet. */
4112        req->current_outstanding_cmd = handle;
4113        req->outstanding_cmds[handle] = sp;
4114        sp->handle = handle;
4115        req->cnt -= req_cnt;
4116
4117        /* Send the command to the firmware */
4118        wmb();
4119        qla2x00_start_iocbs(vha, req);
4120queuing_error:
4121        spin_unlock_irqrestore(&ha->hardware_lock, flags);
4122
4123        return rval;
4124}
4125