linux/drivers/scsi/qla2xxx/qla_iocb.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_target.h"
   9
  10#include <linux/blkdev.h>
  11#include <linux/delay.h>
  12
  13#include <scsi/scsi_tcq.h>
  14
  15/**
  16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17 * @sp: SCSI command
  18 *
  19 * Returns the proper CF_* direction based on CDB.
  20 */
  21static inline uint16_t
  22qla2x00_get_cmd_direction(srb_t *sp)
  23{
  24        uint16_t cflags;
  25        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  26        struct scsi_qla_host *vha = sp->vha;
  27
  28        cflags = 0;
  29
  30        /* Set transfer direction */
  31        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  32                cflags = CF_WRITE;
  33                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  34                vha->qla_stats.output_requests++;
  35        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  36                cflags = CF_READ;
  37                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  38                vha->qla_stats.input_requests++;
  39        }
  40        return (cflags);
  41}
  42
  43/**
  44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  45 * Continuation Type 0 IOCBs to allocate.
  46 *
  47 * @dsds: number of data segment decriptors needed
  48 *
  49 * Returns the number of IOCB entries needed to store @dsds.
  50 */
  51uint16_t
  52qla2x00_calc_iocbs_32(uint16_t dsds)
  53{
  54        uint16_t iocbs;
  55
  56        iocbs = 1;
  57        if (dsds > 3) {
  58                iocbs += (dsds - 3) / 7;
  59                if ((dsds - 3) % 7)
  60                        iocbs++;
  61        }
  62        return (iocbs);
  63}
  64
  65/**
  66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  67 * Continuation Type 1 IOCBs to allocate.
  68 *
  69 * @dsds: number of data segment decriptors needed
  70 *
  71 * Returns the number of IOCB entries needed to store @dsds.
  72 */
  73uint16_t
  74qla2x00_calc_iocbs_64(uint16_t dsds)
  75{
  76        uint16_t iocbs;
  77
  78        iocbs = 1;
  79        if (dsds > 2) {
  80                iocbs += (dsds - 2) / 5;
  81                if ((dsds - 2) % 5)
  82                        iocbs++;
  83        }
  84        return (iocbs);
  85}
  86
  87/**
  88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  89 * @vha: HA context
  90 *
  91 * Returns a pointer to the Continuation Type 0 IOCB packet.
  92 */
  93static inline cont_entry_t *
  94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  95{
  96        cont_entry_t *cont_pkt;
  97        struct req_que *req = vha->req;
  98        /* Adjust ring index. */
  99        req->ring_index++;
 100        if (req->ring_index == req->length) {
 101                req->ring_index = 0;
 102                req->ring_ptr = req->ring;
 103        } else {
 104                req->ring_ptr++;
 105        }
 106
 107        cont_pkt = (cont_entry_t *)req->ring_ptr;
 108
 109        /* Load packet defaults. */
 110        put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 111
 112        return (cont_pkt);
 113}
 114
 115/**
 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 117 * @vha: HA context
 118 * @req: request queue
 119 *
 120 * Returns a pointer to the continuation type 1 IOCB packet.
 121 */
 122static inline cont_a64_entry_t *
 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 124{
 125        cont_a64_entry_t *cont_pkt;
 126
 127        /* Adjust ring index. */
 128        req->ring_index++;
 129        if (req->ring_index == req->length) {
 130                req->ring_index = 0;
 131                req->ring_ptr = req->ring;
 132        } else {
 133                req->ring_ptr++;
 134        }
 135
 136        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 137
 138        /* Load packet defaults. */
 139        put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
 140                           CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 141
 142        return (cont_pkt);
 143}
 144
 145inline int
 146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 147{
 148        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 149        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 150
 151        /* We always use DIFF Bundling for best performance */
 152        *fw_prot_opts = 0;
 153
 154        /* Translate SCSI opcode to a protection opcode */
 155        switch (scsi_get_prot_op(cmd)) {
 156        case SCSI_PROT_READ_STRIP:
 157                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 158                break;
 159        case SCSI_PROT_WRITE_INSERT:
 160                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 161                break;
 162        case SCSI_PROT_READ_INSERT:
 163                *fw_prot_opts |= PO_MODE_DIF_INSERT;
 164                break;
 165        case SCSI_PROT_WRITE_STRIP:
 166                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 167                break;
 168        case SCSI_PROT_READ_PASS:
 169        case SCSI_PROT_WRITE_PASS:
 170                if (guard & SHOST_DIX_GUARD_IP)
 171                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 172                else
 173                        *fw_prot_opts |= PO_MODE_DIF_PASS;
 174                break;
 175        default:        /* Normal Request */
 176                *fw_prot_opts |= PO_MODE_DIF_PASS;
 177                break;
 178        }
 179
 180        return scsi_prot_sg_count(cmd);
 181}
 182
 183/*
 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 185 * capable IOCB types.
 186 *
 187 * @sp: SRB command to process
 188 * @cmd_pkt: Command type 2 IOCB
 189 * @tot_dsds: Total number of segments to transfer
 190 */
 191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 192    uint16_t tot_dsds)
 193{
 194        uint16_t        avail_dsds;
 195        struct dsd32    *cur_dsd;
 196        scsi_qla_host_t *vha;
 197        struct scsi_cmnd *cmd;
 198        struct scatterlist *sg;
 199        int i;
 200
 201        cmd = GET_CMD_SP(sp);
 202
 203        /* Update entry type to indicate Command Type 2 IOCB */
 204        put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 205
 206        /* No data transfer */
 207        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 208                cmd_pkt->byte_count = cpu_to_le32(0);
 209                return;
 210        }
 211
 212        vha = sp->vha;
 213        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 214
 215        /* Three DSDs are available in the Command Type 2 IOCB */
 216        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
 217        cur_dsd = cmd_pkt->dsd32;
 218
 219        /* Load data segments */
 220        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 221                cont_entry_t *cont_pkt;
 222
 223                /* Allocate additional continuation packets? */
 224                if (avail_dsds == 0) {
 225                        /*
 226                         * Seven DSDs are available in the Continuation
 227                         * Type 0 IOCB.
 228                         */
 229                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 230                        cur_dsd = cont_pkt->dsd;
 231                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 232                }
 233
 234                append_dsd32(&cur_dsd, sg);
 235                avail_dsds--;
 236        }
 237}
 238
 239/**
 240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 241 * capable IOCB types.
 242 *
 243 * @sp: SRB command to process
 244 * @cmd_pkt: Command type 3 IOCB
 245 * @tot_dsds: Total number of segments to transfer
 246 */
 247void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 248    uint16_t tot_dsds)
 249{
 250        uint16_t        avail_dsds;
 251        struct dsd64    *cur_dsd;
 252        scsi_qla_host_t *vha;
 253        struct scsi_cmnd *cmd;
 254        struct scatterlist *sg;
 255        int i;
 256
 257        cmd = GET_CMD_SP(sp);
 258
 259        /* Update entry type to indicate Command Type 3 IOCB */
 260        put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 261
 262        /* No data transfer */
 263        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 264                cmd_pkt->byte_count = cpu_to_le32(0);
 265                return;
 266        }
 267
 268        vha = sp->vha;
 269        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 270
 271        /* Two DSDs are available in the Command Type 3 IOCB */
 272        avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
 273        cur_dsd = cmd_pkt->dsd64;
 274
 275        /* Load data segments */
 276        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 277                cont_a64_entry_t *cont_pkt;
 278
 279                /* Allocate additional continuation packets? */
 280                if (avail_dsds == 0) {
 281                        /*
 282                         * Five DSDs are available in the Continuation
 283                         * Type 1 IOCB.
 284                         */
 285                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 286                        cur_dsd = cont_pkt->dsd;
 287                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 288                }
 289
 290                append_dsd64(&cur_dsd, sg);
 291                avail_dsds--;
 292        }
 293}
 294
 295/*
 296 * Find the first handle that is not in use, starting from
 297 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
 298 * associated with @req.
 299 */
 300uint32_t qla2xxx_get_next_handle(struct req_que *req)
 301{
 302        uint32_t index, handle = req->current_outstanding_cmd;
 303
 304        for (index = 1; index < req->num_outstanding_cmds; index++) {
 305                handle++;
 306                if (handle == req->num_outstanding_cmds)
 307                        handle = 1;
 308                if (!req->outstanding_cmds[handle])
 309                        return handle;
 310        }
 311
 312        return 0;
 313}
 314
 315/**
 316 * qla2x00_start_scsi() - Send a SCSI command to the ISP
 317 * @sp: command to send to the ISP
 318 *
 319 * Returns non-zero if a failure occurred, else zero.
 320 */
 321int
 322qla2x00_start_scsi(srb_t *sp)
 323{
 324        int             nseg;
 325        unsigned long   flags;
 326        scsi_qla_host_t *vha;
 327        struct scsi_cmnd *cmd;
 328        uint32_t        *clr_ptr;
 329        uint32_t        handle;
 330        cmd_entry_t     *cmd_pkt;
 331        uint16_t        cnt;
 332        uint16_t        req_cnt;
 333        uint16_t        tot_dsds;
 334        struct device_reg_2xxx __iomem *reg;
 335        struct qla_hw_data *ha;
 336        struct req_que *req;
 337        struct rsp_que *rsp;
 338
 339        /* Setup device pointers. */
 340        vha = sp->vha;
 341        ha = vha->hw;
 342        reg = &ha->iobase->isp;
 343        cmd = GET_CMD_SP(sp);
 344        req = ha->req_q_map[0];
 345        rsp = ha->rsp_q_map[0];
 346        /* So we know we haven't pci_map'ed anything yet */
 347        tot_dsds = 0;
 348
 349        /* Send marker if required */
 350        if (vha->marker_needed != 0) {
 351                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 352                    QLA_SUCCESS) {
 353                        return (QLA_FUNCTION_FAILED);
 354                }
 355                vha->marker_needed = 0;
 356        }
 357
 358        /* Acquire ring specific lock */
 359        spin_lock_irqsave(&ha->hardware_lock, flags);
 360
 361        handle = qla2xxx_get_next_handle(req);
 362        if (handle == 0)
 363                goto queuing_error;
 364
 365        /* Map the sg table so we have an accurate count of sg entries needed */
 366        if (scsi_sg_count(cmd)) {
 367                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 368                    scsi_sg_count(cmd), cmd->sc_data_direction);
 369                if (unlikely(!nseg))
 370                        goto queuing_error;
 371        } else
 372                nseg = 0;
 373
 374        tot_dsds = nseg;
 375
 376        /* Calculate the number of request entries needed. */
 377        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 378        if (req->cnt < (req_cnt + 2)) {
 379                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
 380                if (req->ring_index < cnt)
 381                        req->cnt = cnt - req->ring_index;
 382                else
 383                        req->cnt = req->length -
 384                            (req->ring_index - cnt);
 385                /* If still no head room then bail out */
 386                if (req->cnt < (req_cnt + 2))
 387                        goto queuing_error;
 388        }
 389
 390        /* Build command packet */
 391        req->current_outstanding_cmd = handle;
 392        req->outstanding_cmds[handle] = sp;
 393        sp->handle = handle;
 394        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 395        req->cnt -= req_cnt;
 396
 397        cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 398        cmd_pkt->handle = handle;
 399        /* Zero out remaining portion of packet. */
 400        clr_ptr = (uint32_t *)cmd_pkt + 2;
 401        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 402        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 403
 404        /* Set target ID and LUN number*/
 405        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 406        cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 407        cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 408
 409        /* Load SCSI command packet. */
 410        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 411        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 412
 413        /* Build IOCB segments */
 414        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 415
 416        /* Set total data segment count. */
 417        cmd_pkt->entry_count = (uint8_t)req_cnt;
 418        wmb();
 419
 420        /* Adjust ring index. */
 421        req->ring_index++;
 422        if (req->ring_index == req->length) {
 423                req->ring_index = 0;
 424                req->ring_ptr = req->ring;
 425        } else
 426                req->ring_ptr++;
 427
 428        sp->flags |= SRB_DMA_VALID;
 429
 430        /* Set chip new ring index. */
 431        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 432        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 433
 434        /* Manage unprocessed RIO/ZIO commands in response queue. */
 435        if (vha->flags.process_response_queue &&
 436            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 437                qla2x00_process_response_queue(rsp);
 438
 439        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 440        return (QLA_SUCCESS);
 441
 442queuing_error:
 443        if (tot_dsds)
 444                scsi_dma_unmap(cmd);
 445
 446        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 447
 448        return (QLA_FUNCTION_FAILED);
 449}
 450
 451/**
 452 * qla2x00_start_iocbs() - Execute the IOCB command
 453 * @vha: HA context
 454 * @req: request queue
 455 */
 456void
 457qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 458{
 459        struct qla_hw_data *ha = vha->hw;
 460        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 461
 462        if (IS_P3P_TYPE(ha)) {
 463                qla82xx_start_iocbs(vha);
 464        } else {
 465                /* Adjust ring index. */
 466                req->ring_index++;
 467                if (req->ring_index == req->length) {
 468                        req->ring_index = 0;
 469                        req->ring_ptr = req->ring;
 470                } else
 471                        req->ring_ptr++;
 472
 473                /* Set chip new ring index. */
 474                if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 475                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 476                } else if (IS_QLA83XX(ha)) {
 477                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 478                        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 479                } else if (IS_QLAFX00(ha)) {
 480                        WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
 481                        RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
 482                        QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 483                } else if (IS_FWI2_CAPABLE(ha)) {
 484                        WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 485                        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
 486                } else {
 487                        WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
 488                                req->ring_index);
 489                        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
 490                }
 491        }
 492}
 493
 494/**
 495 * qla2x00_marker() - Send a marker IOCB to the firmware.
 496 * @vha: HA context
 497 * @qpair: queue pair pointer
 498 * @loop_id: loop ID
 499 * @lun: LUN
 500 * @type: marker modifier
 501 *
 502 * Can be called from both normal and interrupt context.
 503 *
 504 * Returns non-zero if a failure occurred, else zero.
 505 */
 506static int
 507__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 508    uint16_t loop_id, uint64_t lun, uint8_t type)
 509{
 510        mrk_entry_t *mrk;
 511        struct mrk_entry_24xx *mrk24 = NULL;
 512        struct req_que *req = qpair->req;
 513        struct qla_hw_data *ha = vha->hw;
 514        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 515
 516        mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 517        if (mrk == NULL) {
 518                ql_log(ql_log_warn, base_vha, 0x3026,
 519                    "Failed to allocate Marker IOCB.\n");
 520
 521                return (QLA_FUNCTION_FAILED);
 522        }
 523
 524        mrk->entry_type = MARKER_TYPE;
 525        mrk->modifier = type;
 526        if (type != MK_SYNC_ALL) {
 527                if (IS_FWI2_CAPABLE(ha)) {
 528                        mrk24 = (struct mrk_entry_24xx *) mrk;
 529                        mrk24->nport_handle = cpu_to_le16(loop_id);
 530                        int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 531                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 532                        mrk24->vp_index = vha->vp_idx;
 533                        mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
 534                } else {
 535                        SET_TARGET_ID(ha, mrk->target, loop_id);
 536                        mrk->lun = cpu_to_le16((uint16_t)lun);
 537                }
 538        }
 539        wmb();
 540
 541        qla2x00_start_iocbs(vha, req);
 542
 543        return (QLA_SUCCESS);
 544}
 545
 546int
 547qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 548    uint16_t loop_id, uint64_t lun, uint8_t type)
 549{
 550        int ret;
 551        unsigned long flags = 0;
 552
 553        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 554        ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
 555        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 556
 557        return (ret);
 558}
 559
 560/*
 561 * qla2x00_issue_marker
 562 *
 563 * Issue marker
 564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
 565 * Might release it, then reaquire.
 566 */
 567int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 568{
 569        if (ha_locked) {
 570                if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 571                                        MK_SYNC_ALL) != QLA_SUCCESS)
 572                        return QLA_FUNCTION_FAILED;
 573        } else {
 574                if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 575                                        MK_SYNC_ALL) != QLA_SUCCESS)
 576                        return QLA_FUNCTION_FAILED;
 577        }
 578        vha->marker_needed = 0;
 579
 580        return QLA_SUCCESS;
 581}
 582
 583static inline int
 584qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 585        uint16_t tot_dsds)
 586{
 587        struct dsd64 *cur_dsd = NULL, *next_dsd;
 588        scsi_qla_host_t *vha;
 589        struct qla_hw_data *ha;
 590        struct scsi_cmnd *cmd;
 591        struct  scatterlist *cur_seg;
 592        uint8_t avail_dsds;
 593        uint8_t first_iocb = 1;
 594        uint32_t dsd_list_len;
 595        struct dsd_dma *dsd_ptr;
 596        struct ct6_dsd *ctx;
 597
 598        cmd = GET_CMD_SP(sp);
 599
 600        /* Update entry type to indicate Command Type 3 IOCB */
 601        put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 602
 603        /* No data transfer */
 604        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 605                cmd_pkt->byte_count = cpu_to_le32(0);
 606                return 0;
 607        }
 608
 609        vha = sp->vha;
 610        ha = vha->hw;
 611
 612        /* Set transfer direction */
 613        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 614                cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 615                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 616                vha->qla_stats.output_requests++;
 617        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 618                cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 619                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 620                vha->qla_stats.input_requests++;
 621        }
 622
 623        cur_seg = scsi_sglist(cmd);
 624        ctx = sp->u.scmd.ct6_ctx;
 625
 626        while (tot_dsds) {
 627                avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 628                    QLA_DSDS_PER_IOCB : tot_dsds;
 629                tot_dsds -= avail_dsds;
 630                dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 631
 632                dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 633                    struct dsd_dma, list);
 634                next_dsd = dsd_ptr->dsd_addr;
 635                list_del(&dsd_ptr->list);
 636                ha->gbl_dsd_avail--;
 637                list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 638                ctx->dsd_use_cnt++;
 639                ha->gbl_dsd_inuse++;
 640
 641                if (first_iocb) {
 642                        first_iocb = 0;
 643                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 644                                           &cmd_pkt->fcp_dsd.address);
 645                        cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 646                } else {
 647                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 648                                           &cur_dsd->address);
 649                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 650                        cur_dsd++;
 651                }
 652                cur_dsd = next_dsd;
 653                while (avail_dsds) {
 654                        append_dsd64(&cur_dsd, cur_seg);
 655                        cur_seg = sg_next(cur_seg);
 656                        avail_dsds--;
 657                }
 658        }
 659
 660        /* Null termination */
 661        cur_dsd->address = 0;
 662        cur_dsd->length = 0;
 663        cur_dsd++;
 664        cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 665        return 0;
 666}
 667
 668/*
 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 670 * for Command Type 6.
 671 *
 672 * @dsds: number of data segment decriptors needed
 673 *
 674 * Returns the number of dsd list needed to store @dsds.
 675 */
 676static inline uint16_t
 677qla24xx_calc_dsd_lists(uint16_t dsds)
 678{
 679        uint16_t dsd_lists = 0;
 680
 681        dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 682        if (dsds % QLA_DSDS_PER_IOCB)
 683                dsd_lists++;
 684        return dsd_lists;
 685}
 686
 687
 688/**
 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 690 * IOCB types.
 691 *
 692 * @sp: SRB command to process
 693 * @cmd_pkt: Command type 3 IOCB
 694 * @tot_dsds: Total number of segments to transfer
 695 * @req: pointer to request queue
 696 */
 697inline void
 698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 699        uint16_t tot_dsds, struct req_que *req)
 700{
 701        uint16_t        avail_dsds;
 702        struct dsd64    *cur_dsd;
 703        scsi_qla_host_t *vha;
 704        struct scsi_cmnd *cmd;
 705        struct scatterlist *sg;
 706        int i;
 707
 708        cmd = GET_CMD_SP(sp);
 709
 710        /* Update entry type to indicate Command Type 3 IOCB */
 711        put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 712
 713        /* No data transfer */
 714        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 715                cmd_pkt->byte_count = cpu_to_le32(0);
 716                return;
 717        }
 718
 719        vha = sp->vha;
 720
 721        /* Set transfer direction */
 722        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 723                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 724                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 725                vha->qla_stats.output_requests++;
 726        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 727                cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 728                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 729                vha->qla_stats.input_requests++;
 730        }
 731
 732        /* One DSD is available in the Command Type 3 IOCB */
 733        avail_dsds = 1;
 734        cur_dsd = &cmd_pkt->dsd;
 735
 736        /* Load data segments */
 737
 738        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 739                cont_a64_entry_t *cont_pkt;
 740
 741                /* Allocate additional continuation packets? */
 742                if (avail_dsds == 0) {
 743                        /*
 744                         * Five DSDs are available in the Continuation
 745                         * Type 1 IOCB.
 746                         */
 747                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 748                        cur_dsd = cont_pkt->dsd;
 749                        avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 750                }
 751
 752                append_dsd64(&cur_dsd, sg);
 753                avail_dsds--;
 754        }
 755}
 756
 757struct fw_dif_context {
 758        uint32_t ref_tag;
 759        uint16_t app_tag;
 760        uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 761        uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 762};
 763
 764/*
 765 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 766 *
 767 */
 768static inline void
 769qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 770    unsigned int protcnt)
 771{
 772        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 773
 774        switch (scsi_get_prot_type(cmd)) {
 775        case SCSI_PROT_DIF_TYPE0:
 776                /*
 777                 * No check for ql2xenablehba_err_chk, as it would be an
 778                 * I/O error if hba tag generation is not done.
 779                 */
 780                pkt->ref_tag = cpu_to_le32((uint32_t)
 781                    (0xffffffff & scsi_get_lba(cmd)));
 782
 783                if (!qla2x00_hba_err_chk_enabled(sp))
 784                        break;
 785
 786                pkt->ref_tag_mask[0] = 0xff;
 787                pkt->ref_tag_mask[1] = 0xff;
 788                pkt->ref_tag_mask[2] = 0xff;
 789                pkt->ref_tag_mask[3] = 0xff;
 790                break;
 791
 792        /*
 793         * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 794         * match LBA in CDB + N
 795         */
 796        case SCSI_PROT_DIF_TYPE2:
 797                pkt->app_tag = cpu_to_le16(0);
 798                pkt->app_tag_mask[0] = 0x0;
 799                pkt->app_tag_mask[1] = 0x0;
 800
 801                pkt->ref_tag = cpu_to_le32((uint32_t)
 802                    (0xffffffff & scsi_get_lba(cmd)));
 803
 804                if (!qla2x00_hba_err_chk_enabled(sp))
 805                        break;
 806
 807                /* enable ALL bytes of the ref tag */
 808                pkt->ref_tag_mask[0] = 0xff;
 809                pkt->ref_tag_mask[1] = 0xff;
 810                pkt->ref_tag_mask[2] = 0xff;
 811                pkt->ref_tag_mask[3] = 0xff;
 812                break;
 813
 814        /* For Type 3 protection: 16 bit GUARD only */
 815        case SCSI_PROT_DIF_TYPE3:
 816                pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 817                        pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 818                                                                0x00;
 819                break;
 820
 821        /*
 822         * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 823         * 16 bit app tag.
 824         */
 825        case SCSI_PROT_DIF_TYPE1:
 826                pkt->ref_tag = cpu_to_le32((uint32_t)
 827                    (0xffffffff & scsi_get_lba(cmd)));
 828                pkt->app_tag = cpu_to_le16(0);
 829                pkt->app_tag_mask[0] = 0x0;
 830                pkt->app_tag_mask[1] = 0x0;
 831
 832                if (!qla2x00_hba_err_chk_enabled(sp))
 833                        break;
 834
 835                /* enable ALL bytes of the ref tag */
 836                pkt->ref_tag_mask[0] = 0xff;
 837                pkt->ref_tag_mask[1] = 0xff;
 838                pkt->ref_tag_mask[2] = 0xff;
 839                pkt->ref_tag_mask[3] = 0xff;
 840                break;
 841        }
 842}
 843
 844int
 845qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 846        uint32_t *partial)
 847{
 848        struct scatterlist *sg;
 849        uint32_t cumulative_partial, sg_len;
 850        dma_addr_t sg_dma_addr;
 851
 852        if (sgx->num_bytes == sgx->tot_bytes)
 853                return 0;
 854
 855        sg = sgx->cur_sg;
 856        cumulative_partial = sgx->tot_partial;
 857
 858        sg_dma_addr = sg_dma_address(sg);
 859        sg_len = sg_dma_len(sg);
 860
 861        sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 862
 863        if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 864                sgx->dma_len = (blk_sz - cumulative_partial);
 865                sgx->tot_partial = 0;
 866                sgx->num_bytes += blk_sz;
 867                *partial = 0;
 868        } else {
 869                sgx->dma_len = sg_len - sgx->bytes_consumed;
 870                sgx->tot_partial += sgx->dma_len;
 871                *partial = 1;
 872        }
 873
 874        sgx->bytes_consumed += sgx->dma_len;
 875
 876        if (sg_len == sgx->bytes_consumed) {
 877                sg = sg_next(sg);
 878                sgx->num_sg++;
 879                sgx->cur_sg = sg;
 880                sgx->bytes_consumed = 0;
 881        }
 882
 883        return 1;
 884}
 885
 886int
 887qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 888        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 889{
 890        void *next_dsd;
 891        uint8_t avail_dsds = 0;
 892        uint32_t dsd_list_len;
 893        struct dsd_dma *dsd_ptr;
 894        struct scatterlist *sg_prot;
 895        struct dsd64 *cur_dsd = dsd;
 896        uint16_t        used_dsds = tot_dsds;
 897        uint32_t        prot_int; /* protection interval */
 898        uint32_t        partial;
 899        struct qla2_sgx sgx;
 900        dma_addr_t      sle_dma;
 901        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 902        struct scsi_cmnd *cmd;
 903
 904        memset(&sgx, 0, sizeof(struct qla2_sgx));
 905        if (sp) {
 906                cmd = GET_CMD_SP(sp);
 907                prot_int = cmd->device->sector_size;
 908
 909                sgx.tot_bytes = scsi_bufflen(cmd);
 910                sgx.cur_sg = scsi_sglist(cmd);
 911                sgx.sp = sp;
 912
 913                sg_prot = scsi_prot_sglist(cmd);
 914        } else if (tc) {
 915                prot_int      = tc->blk_sz;
 916                sgx.tot_bytes = tc->bufflen;
 917                sgx.cur_sg    = tc->sg;
 918                sg_prot       = tc->prot_sg;
 919        } else {
 920                BUG();
 921                return 1;
 922        }
 923
 924        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 925
 926                sle_dma = sgx.dma_addr;
 927                sle_dma_len = sgx.dma_len;
 928alloc_and_fill:
 929                /* Allocate additional continuation packets? */
 930                if (avail_dsds == 0) {
 931                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 932                                        QLA_DSDS_PER_IOCB : used_dsds;
 933                        dsd_list_len = (avail_dsds + 1) * 12;
 934                        used_dsds -= avail_dsds;
 935
 936                        /* allocate tracking DS */
 937                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 938                        if (!dsd_ptr)
 939                                return 1;
 940
 941                        /* allocate new list */
 942                        dsd_ptr->dsd_addr = next_dsd =
 943                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 944                                &dsd_ptr->dsd_list_dma);
 945
 946                        if (!next_dsd) {
 947                                /*
 948                                 * Need to cleanup only this dsd_ptr, rest
 949                                 * will be done by sp_free_dma()
 950                                 */
 951                                kfree(dsd_ptr);
 952                                return 1;
 953                        }
 954
 955                        if (sp) {
 956                                list_add_tail(&dsd_ptr->list,
 957                                              &sp->u.scmd.crc_ctx->dsd_list);
 958
 959                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
 960                        } else {
 961                                list_add_tail(&dsd_ptr->list,
 962                                    &(tc->ctx->dsd_list));
 963                                *tc->ctx_dsd_alloced = 1;
 964                        }
 965
 966
 967                        /* add new list to cmd iocb or last list */
 968                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
 969                                           &cur_dsd->address);
 970                        cur_dsd->length = cpu_to_le32(dsd_list_len);
 971                        cur_dsd = next_dsd;
 972                }
 973                put_unaligned_le64(sle_dma, &cur_dsd->address);
 974                cur_dsd->length = cpu_to_le32(sle_dma_len);
 975                cur_dsd++;
 976                avail_dsds--;
 977
 978                if (partial == 0) {
 979                        /* Got a full protection interval */
 980                        sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 981                        sle_dma_len = 8;
 982
 983                        tot_prot_dma_len += sle_dma_len;
 984                        if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 985                                tot_prot_dma_len = 0;
 986                                sg_prot = sg_next(sg_prot);
 987                        }
 988
 989                        partial = 1; /* So as to not re-enter this block */
 990                        goto alloc_and_fill;
 991                }
 992        }
 993        /* Null termination */
 994        cur_dsd->address = 0;
 995        cur_dsd->length = 0;
 996        cur_dsd++;
 997        return 0;
 998}
 999
1000int
1001qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1002        struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1003{
1004        void *next_dsd;
1005        uint8_t avail_dsds = 0;
1006        uint32_t dsd_list_len;
1007        struct dsd_dma *dsd_ptr;
1008        struct scatterlist *sg, *sgl;
1009        struct dsd64 *cur_dsd = dsd;
1010        int     i;
1011        uint16_t        used_dsds = tot_dsds;
1012        struct scsi_cmnd *cmd;
1013
1014        if (sp) {
1015                cmd = GET_CMD_SP(sp);
1016                sgl = scsi_sglist(cmd);
1017        } else if (tc) {
1018                sgl = tc->sg;
1019        } else {
1020                BUG();
1021                return 1;
1022        }
1023
1024
1025        for_each_sg(sgl, sg, tot_dsds, i) {
1026                /* Allocate additional continuation packets? */
1027                if (avail_dsds == 0) {
1028                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1029                                        QLA_DSDS_PER_IOCB : used_dsds;
1030                        dsd_list_len = (avail_dsds + 1) * 12;
1031                        used_dsds -= avail_dsds;
1032
1033                        /* allocate tracking DS */
1034                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1035                        if (!dsd_ptr)
1036                                return 1;
1037
1038                        /* allocate new list */
1039                        dsd_ptr->dsd_addr = next_dsd =
1040                            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1041                                &dsd_ptr->dsd_list_dma);
1042
1043                        if (!next_dsd) {
1044                                /*
1045                                 * Need to cleanup only this dsd_ptr, rest
1046                                 * will be done by sp_free_dma()
1047                                 */
1048                                kfree(dsd_ptr);
1049                                return 1;
1050                        }
1051
1052                        if (sp) {
1053                                list_add_tail(&dsd_ptr->list,
1054                                              &sp->u.scmd.crc_ctx->dsd_list);
1055
1056                                sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057                        } else {
1058                                list_add_tail(&dsd_ptr->list,
1059                                    &(tc->ctx->dsd_list));
1060                                *tc->ctx_dsd_alloced = 1;
1061                        }
1062
1063                        /* add new list to cmd iocb or last list */
1064                        put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065                                           &cur_dsd->address);
1066                        cur_dsd->length = cpu_to_le32(dsd_list_len);
1067                        cur_dsd = next_dsd;
1068                }
1069                append_dsd64(&cur_dsd, sg);
1070                avail_dsds--;
1071
1072        }
1073        /* Null termination */
1074        cur_dsd->address = 0;
1075        cur_dsd->length = 0;
1076        cur_dsd++;
1077        return 0;
1078}
1079
1080int
1081qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1082        struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1083{
1084        struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1085        struct scatterlist *sg, *sgl;
1086        struct crc_context *difctx = NULL;
1087        struct scsi_qla_host *vha;
1088        uint dsd_list_len;
1089        uint avail_dsds = 0;
1090        uint used_dsds = tot_dsds;
1091        bool dif_local_dma_alloc = false;
1092        bool direction_to_device = false;
1093        int i;
1094
1095        if (sp) {
1096                struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1097
1098                sgl = scsi_prot_sglist(cmd);
1099                vha = sp->vha;
1100                difctx = sp->u.scmd.crc_ctx;
1101                direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1102                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1103                  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1104                        __func__, cmd, difctx, sp);
1105        } else if (tc) {
1106                vha = tc->vha;
1107                sgl = tc->prot_sg;
1108                difctx = tc->ctx;
1109                direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1110        } else {
1111                BUG();
1112                return 1;
1113        }
1114
1115        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116            "%s: enter (write=%u)\n", __func__, direction_to_device);
1117
1118        /* if initiator doing write or target doing read */
1119        if (direction_to_device) {
1120                for_each_sg(sgl, sg, tot_dsds, i) {
1121                        u64 sle_phys = sg_phys(sg);
1122
1123                        /* If SGE addr + len flips bits in upper 32-bits */
1124                        if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1125                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1126                                    "%s: page boundary crossing (phys=%llx len=%x)\n",
1127                                    __func__, sle_phys, sg->length);
1128
1129                                if (difctx) {
1130                                        ha->dif_bundle_crossed_pages++;
1131                                        dif_local_dma_alloc = true;
1132                                } else {
1133                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134                                            vha, 0xe022,
1135                                            "%s: difctx pointer is NULL\n",
1136                                            __func__);
1137                                }
1138                                break;
1139                        }
1140                }
1141                ha->dif_bundle_writes++;
1142        } else {
1143                ha->dif_bundle_reads++;
1144        }
1145
1146        if (ql2xdifbundlinginternalbuffers)
1147                dif_local_dma_alloc = direction_to_device;
1148
1149        if (dif_local_dma_alloc) {
1150                u32 track_difbundl_buf = 0;
1151                u32 ldma_sg_len = 0;
1152                u8 ldma_needed = 1;
1153
1154                difctx->no_dif_bundl = 0;
1155                difctx->dif_bundl_len = 0;
1156
1157                /* Track DSD buffers */
1158                INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1159                /* Track local DMA buffers */
1160                INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1161
1162                for_each_sg(sgl, sg, tot_dsds, i) {
1163                        u32 sglen = sg_dma_len(sg);
1164
1165                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1166                            "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1167                            __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1168                            difctx->dif_bundl_len, ldma_needed);
1169
1170                        while (sglen) {
1171                                u32 xfrlen = 0;
1172
1173                                if (ldma_needed) {
1174                                        /*
1175                                         * Allocate list item to store
1176                                         * the DMA buffers
1177                                         */
1178                                        dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1179                                            GFP_ATOMIC);
1180                                        if (!dsd_ptr) {
1181                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1182                                                    "%s: failed alloc dsd_ptr\n",
1183                                                    __func__);
1184                                                return 1;
1185                                        }
1186                                        ha->dif_bundle_kallocs++;
1187
1188                                        /* allocate dma buffer */
1189                                        dsd_ptr->dsd_addr = dma_pool_alloc
1190                                                (ha->dif_bundl_pool, GFP_ATOMIC,
1191                                                 &dsd_ptr->dsd_list_dma);
1192                                        if (!dsd_ptr->dsd_addr) {
1193                                                ql_dbg(ql_dbg_tgt, vha, 0xe024,
1194                                                    "%s: failed alloc ->dsd_ptr\n",
1195                                                    __func__);
1196                                                /*
1197                                                 * need to cleanup only this
1198                                                 * dsd_ptr rest will be done
1199                                                 * by sp_free_dma()
1200                                                 */
1201                                                kfree(dsd_ptr);
1202                                                ha->dif_bundle_kallocs--;
1203                                                return 1;
1204                                        }
1205                                        ha->dif_bundle_dma_allocs++;
1206                                        ldma_needed = 0;
1207                                        difctx->no_dif_bundl++;
1208                                        list_add_tail(&dsd_ptr->list,
1209                                            &difctx->ldif_dma_hndl_list);
1210                                }
1211
1212                                /* xfrlen is min of dma pool size and sglen */
1213                                xfrlen = (sglen >
1214                                   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1215                                    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1216                                    sglen;
1217
1218                                /* replace with local allocated dma buffer */
1219                                sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1220                                    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1221                                    difctx->dif_bundl_len);
1222                                difctx->dif_bundl_len += xfrlen;
1223                                sglen -= xfrlen;
1224                                ldma_sg_len += xfrlen;
1225                                if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1226                                    sg_is_last(sg)) {
1227                                        ldma_needed = 1;
1228                                        ldma_sg_len = 0;
1229                                }
1230                        }
1231                }
1232
1233                track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1234                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1235                    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1236                    difctx->dif_bundl_len, difctx->no_dif_bundl,
1237                    track_difbundl_buf);
1238
1239                if (sp)
1240                        sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241                else
1242                        tc->prot_flags = DIF_BUNDL_DMA_VALID;
1243
1244                list_for_each_entry_safe(dif_dsd, nxt_dsd,
1245                    &difctx->ldif_dma_hndl_list, list) {
1246                        u32 sglen = (difctx->dif_bundl_len >
1247                            DIF_BUNDLING_DMA_POOL_SIZE) ?
1248                            DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1249
1250                        BUG_ON(track_difbundl_buf == 0);
1251
1252                        /* Allocate additional continuation packets? */
1253                        if (avail_dsds == 0) {
1254                                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255                                    0xe024,
1256                                    "%s: adding continuation iocb's\n",
1257                                    __func__);
1258                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1259                                    QLA_DSDS_PER_IOCB : used_dsds;
1260                                dsd_list_len = (avail_dsds + 1) * 12;
1261                                used_dsds -= avail_dsds;
1262
1263                                /* allocate tracking DS */
1264                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265                                if (!dsd_ptr) {
1266                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1267                                            "%s: failed alloc dsd_ptr\n",
1268                                            __func__);
1269                                        return 1;
1270                                }
1271                                ha->dif_bundle_kallocs++;
1272
1273                                difctx->no_ldif_dsd++;
1274                                /* allocate new list */
1275                                dsd_ptr->dsd_addr =
1276                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1277                                        &dsd_ptr->dsd_list_dma);
1278                                if (!dsd_ptr->dsd_addr) {
1279                                        ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280                                            "%s: failed alloc ->dsd_addr\n",
1281                                            __func__);
1282                                        /*
1283                                         * need to cleanup only this dsd_ptr
1284                                         *  rest will be done by sp_free_dma()
1285                                         */
1286                                        kfree(dsd_ptr);
1287                                        ha->dif_bundle_kallocs--;
1288                                        return 1;
1289                                }
1290                                ha->dif_bundle_dma_allocs++;
1291
1292                                if (sp) {
1293                                        list_add_tail(&dsd_ptr->list,
1294                                            &difctx->ldif_dsd_list);
1295                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296                                } else {
1297                                        list_add_tail(&dsd_ptr->list,
1298                                            &difctx->ldif_dsd_list);
1299                                        tc->ctx_dsd_alloced = 1;
1300                                }
1301
1302                                /* add new list to cmd iocb or last list */
1303                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304                                                   &cur_dsd->address);
1305                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1306                                cur_dsd = dsd_ptr->dsd_addr;
1307                        }
1308                        put_unaligned_le64(dif_dsd->dsd_list_dma,
1309                                           &cur_dsd->address);
1310                        cur_dsd->length = cpu_to_le32(sglen);
1311                        cur_dsd++;
1312                        avail_dsds--;
1313                        difctx->dif_bundl_len -= sglen;
1314                        track_difbundl_buf--;
1315                }
1316
1317                ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1318                    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1319                        difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320        } else {
1321                for_each_sg(sgl, sg, tot_dsds, i) {
1322                        /* Allocate additional continuation packets? */
1323                        if (avail_dsds == 0) {
1324                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1325                                    QLA_DSDS_PER_IOCB : used_dsds;
1326                                dsd_list_len = (avail_dsds + 1) * 12;
1327                                used_dsds -= avail_dsds;
1328
1329                                /* allocate tracking DS */
1330                                dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331                                if (!dsd_ptr) {
1332                                        ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333                                            vha, 0xe027,
1334                                            "%s: failed alloc dsd_dma...\n",
1335                                            __func__);
1336                                        return 1;
1337                                }
1338
1339                                /* allocate new list */
1340                                dsd_ptr->dsd_addr =
1341                                    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1342                                        &dsd_ptr->dsd_list_dma);
1343                                if (!dsd_ptr->dsd_addr) {
1344                                        /* need to cleanup only this dsd_ptr */
1345                                        /* rest will be done by sp_free_dma() */
1346                                        kfree(dsd_ptr);
1347                                        return 1;
1348                                }
1349
1350                                if (sp) {
1351                                        list_add_tail(&dsd_ptr->list,
1352                                            &difctx->dsd_list);
1353                                        sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354                                } else {
1355                                        list_add_tail(&dsd_ptr->list,
1356                                            &difctx->dsd_list);
1357                                        tc->ctx_dsd_alloced = 1;
1358                                }
1359
1360                                /* add new list to cmd iocb or last list */
1361                                put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362                                                   &cur_dsd->address);
1363                                cur_dsd->length = cpu_to_le32(dsd_list_len);
1364                                cur_dsd = dsd_ptr->dsd_addr;
1365                        }
1366                        append_dsd64(&cur_dsd, sg);
1367                        avail_dsds--;
1368                }
1369        }
1370        /* Null termination */
1371        cur_dsd->address = 0;
1372        cur_dsd->length = 0;
1373        cur_dsd++;
1374        return 0;
1375}
1376
1377/**
1378 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1379 *                                                      Type 6 IOCB types.
1380 *
1381 * @sp: SRB command to process
1382 * @cmd_pkt: Command type 3 IOCB
1383 * @tot_dsds: Total number of segments to transfer
1384 * @tot_prot_dsds: Total number of segments with protection information
1385 * @fw_prot_opts: Protection options to be passed to firmware
1386 */
1387static inline int
1388qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1389    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1390{
1391        struct dsd64            *cur_dsd;
1392        uint32_t                *fcp_dl;
1393        scsi_qla_host_t         *vha;
1394        struct scsi_cmnd        *cmd;
1395        uint32_t                total_bytes = 0;
1396        uint32_t                data_bytes;
1397        uint32_t                dif_bytes;
1398        uint8_t                 bundling = 1;
1399        uint16_t                blk_size;
1400        struct crc_context      *crc_ctx_pkt = NULL;
1401        struct qla_hw_data      *ha;
1402        uint8_t                 additional_fcpcdb_len;
1403        uint16_t                fcp_cmnd_len;
1404        struct fcp_cmnd         *fcp_cmnd;
1405        dma_addr_t              crc_ctx_dma;
1406
1407        cmd = GET_CMD_SP(sp);
1408
1409        /* Update entry type to indicate Command Type CRC_2 IOCB */
1410        put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1411
1412        vha = sp->vha;
1413        ha = vha->hw;
1414
1415        /* No data transfer */
1416        data_bytes = scsi_bufflen(cmd);
1417        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1418                cmd_pkt->byte_count = cpu_to_le32(0);
1419                return QLA_SUCCESS;
1420        }
1421
1422        cmd_pkt->vp_index = sp->vha->vp_idx;
1423
1424        /* Set transfer direction */
1425        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1426                cmd_pkt->control_flags =
1427                    cpu_to_le16(CF_WRITE_DATA);
1428        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1429                cmd_pkt->control_flags =
1430                    cpu_to_le16(CF_READ_DATA);
1431        }
1432
1433        if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1434            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1435            (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1436            (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1437                bundling = 0;
1438
1439        /* Allocate CRC context from global pool */
1440        crc_ctx_pkt = sp->u.scmd.crc_ctx =
1441            dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1442
1443        if (!crc_ctx_pkt)
1444                goto crc_queuing_error;
1445
1446        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1447
1448        sp->flags |= SRB_CRC_CTX_DMA_VALID;
1449
1450        /* Set handle */
1451        crc_ctx_pkt->handle = cmd_pkt->handle;
1452
1453        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1454
1455        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1456            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1457
1458        put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1459        cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1460
1461        /* Determine SCSI command length -- align to 4 byte boundary */
1462        if (cmd->cmd_len > 16) {
1463                additional_fcpcdb_len = cmd->cmd_len - 16;
1464                if ((cmd->cmd_len % 4) != 0) {
1465                        /* SCSI cmd > 16 bytes must be multiple of 4 */
1466                        goto crc_queuing_error;
1467                }
1468                fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469        } else {
1470                additional_fcpcdb_len = 0;
1471                fcp_cmnd_len = 12 + 16 + 4;
1472        }
1473
1474        fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1475
1476        fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1477        if (cmd->sc_data_direction == DMA_TO_DEVICE)
1478                fcp_cmnd->additional_cdb_len |= 1;
1479        else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1480                fcp_cmnd->additional_cdb_len |= 2;
1481
1482        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1483        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1484        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1485        put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1486                           &cmd_pkt->fcp_cmnd_dseg_address);
1487        fcp_cmnd->task_management = 0;
1488        fcp_cmnd->task_attribute = TSK_SIMPLE;
1489
1490        cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1491
1492        /* Compute dif len and adjust data len to incude protection */
1493        dif_bytes = 0;
1494        blk_size = cmd->device->sector_size;
1495        dif_bytes = (data_bytes / blk_size) * 8;
1496
1497        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1498        case SCSI_PROT_READ_INSERT:
1499        case SCSI_PROT_WRITE_STRIP:
1500                total_bytes = data_bytes;
1501                data_bytes += dif_bytes;
1502                break;
1503
1504        case SCSI_PROT_READ_STRIP:
1505        case SCSI_PROT_WRITE_INSERT:
1506        case SCSI_PROT_READ_PASS:
1507        case SCSI_PROT_WRITE_PASS:
1508                total_bytes = data_bytes + dif_bytes;
1509                break;
1510        default:
1511                BUG();
1512        }
1513
1514        if (!qla2x00_hba_err_chk_enabled(sp))
1515                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1516        /* HBA error checking enabled */
1517        else if (IS_PI_UNINIT_CAPABLE(ha)) {
1518                if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1519                    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1520                        SCSI_PROT_DIF_TYPE2))
1521                        fw_prot_opts |= BIT_10;
1522                else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1523                    SCSI_PROT_DIF_TYPE3)
1524                        fw_prot_opts |= BIT_11;
1525        }
1526
1527        if (!bundling) {
1528                cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1529        } else {
1530                /*
1531                 * Configure Bundling if we need to fetch interlaving
1532                 * protection PCI accesses
1533                 */
1534                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1535                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1536                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537                                                        tot_prot_dsds);
1538                cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1539        }
1540
1541        /* Finish the common fields of CRC pkt */
1542        crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1543        crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1544        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1545        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1546        /* Fibre channel byte count */
1547        cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1548        fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549            additional_fcpcdb_len);
1550        *fcp_dl = htonl(total_bytes);
1551
1552        if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1553                cmd_pkt->byte_count = cpu_to_le32(0);
1554                return QLA_SUCCESS;
1555        }
1556        /* Walks data segments */
1557
1558        cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1559
1560        if (!bundling && tot_prot_dsds) {
1561                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1562                        cur_dsd, tot_dsds, NULL))
1563                        goto crc_queuing_error;
1564        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1565                        (tot_dsds - tot_prot_dsds), NULL))
1566                goto crc_queuing_error;
1567
1568        if (bundling && tot_prot_dsds) {
1569                /* Walks dif segments */
1570                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1571                cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1572                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1573                                tot_prot_dsds, NULL))
1574                        goto crc_queuing_error;
1575        }
1576        return QLA_SUCCESS;
1577
1578crc_queuing_error:
1579        /* Cleanup will be performed by the caller */
1580
1581        return QLA_FUNCTION_FAILED;
1582}
1583
1584/**
1585 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1586 * @sp: command to send to the ISP
1587 *
1588 * Returns non-zero if a failure occurred, else zero.
1589 */
1590int
1591qla24xx_start_scsi(srb_t *sp)
1592{
1593        int             nseg;
1594        unsigned long   flags;
1595        uint32_t        *clr_ptr;
1596        uint32_t        handle;
1597        struct cmd_type_7 *cmd_pkt;
1598        uint16_t        cnt;
1599        uint16_t        req_cnt;
1600        uint16_t        tot_dsds;
1601        struct req_que *req = NULL;
1602        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1603        struct scsi_qla_host *vha = sp->vha;
1604        struct qla_hw_data *ha = vha->hw;
1605
1606        /* Setup device pointers. */
1607        req = vha->req;
1608
1609        /* So we know we haven't pci_map'ed anything yet */
1610        tot_dsds = 0;
1611
1612        /* Send marker if required */
1613        if (vha->marker_needed != 0) {
1614                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615                    QLA_SUCCESS)
1616                        return QLA_FUNCTION_FAILED;
1617                vha->marker_needed = 0;
1618        }
1619
1620        /* Acquire ring specific lock */
1621        spin_lock_irqsave(&ha->hardware_lock, flags);
1622
1623        handle = qla2xxx_get_next_handle(req);
1624        if (handle == 0)
1625                goto queuing_error;
1626
1627        /* Map the sg table so we have an accurate count of sg entries needed */
1628        if (scsi_sg_count(cmd)) {
1629                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630                    scsi_sg_count(cmd), cmd->sc_data_direction);
1631                if (unlikely(!nseg))
1632                        goto queuing_error;
1633        } else
1634                nseg = 0;
1635
1636        tot_dsds = nseg;
1637        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638        if (req->cnt < (req_cnt + 2)) {
1639                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640                    RD_REG_DWORD_RELAXED(req->req_q_out);
1641                if (req->ring_index < cnt)
1642                        req->cnt = cnt - req->ring_index;
1643                else
1644                        req->cnt = req->length -
1645                                (req->ring_index - cnt);
1646                if (req->cnt < (req_cnt + 2))
1647                        goto queuing_error;
1648        }
1649
1650        /* Build command packet. */
1651        req->current_outstanding_cmd = handle;
1652        req->outstanding_cmds[handle] = sp;
1653        sp->handle = handle;
1654        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655        req->cnt -= req_cnt;
1656
1657        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1659
1660        /* Zero out remaining portion of packet. */
1661        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662        clr_ptr = (uint32_t *)cmd_pkt + 2;
1663        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666        /* Set NPORT-ID and LUN number*/
1667        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671        cmd_pkt->vp_index = sp->vha->vp_idx;
1672
1673        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1675
1676        cmd_pkt->task = TSK_SIMPLE;
1677
1678        /* Load SCSI command packet. */
1679        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681
1682        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1683
1684        /* Build IOCB segments */
1685        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1686
1687        /* Set total data segment count. */
1688        cmd_pkt->entry_count = (uint8_t)req_cnt;
1689        wmb();
1690        /* Adjust ring index. */
1691        req->ring_index++;
1692        if (req->ring_index == req->length) {
1693                req->ring_index = 0;
1694                req->ring_ptr = req->ring;
1695        } else
1696                req->ring_ptr++;
1697
1698        sp->flags |= SRB_DMA_VALID;
1699
1700        /* Set chip new ring index. */
1701        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1702
1703        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704        return QLA_SUCCESS;
1705
1706queuing_error:
1707        if (tot_dsds)
1708                scsi_dma_unmap(cmd);
1709
1710        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711
1712        return QLA_FUNCTION_FAILED;
1713}
1714
1715/**
1716 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717 * @sp: command to send to the ISP
1718 *
1719 * Returns non-zero if a failure occurred, else zero.
1720 */
1721int
1722qla24xx_dif_start_scsi(srb_t *sp)
1723{
1724        int                     nseg;
1725        unsigned long           flags;
1726        uint32_t                *clr_ptr;
1727        uint32_t                handle;
1728        uint16_t                cnt;
1729        uint16_t                req_cnt = 0;
1730        uint16_t                tot_dsds;
1731        uint16_t                tot_prot_dsds;
1732        uint16_t                fw_prot_opts = 0;
1733        struct req_que          *req = NULL;
1734        struct rsp_que          *rsp = NULL;
1735        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1736        struct scsi_qla_host    *vha = sp->vha;
1737        struct qla_hw_data      *ha = vha->hw;
1738        struct cmd_type_crc_2   *cmd_pkt;
1739        uint32_t                status = 0;
1740
1741#define QDSS_GOT_Q_SPACE        BIT_0
1742
1743        /* Only process protection or >16 cdb in this routine */
1744        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745                if (cmd->cmd_len <= 16)
1746                        return qla24xx_start_scsi(sp);
1747        }
1748
1749        /* Setup device pointers. */
1750        req = vha->req;
1751        rsp = req->rsp;
1752
1753        /* So we know we haven't pci_map'ed anything yet */
1754        tot_dsds = 0;
1755
1756        /* Send marker if required */
1757        if (vha->marker_needed != 0) {
1758                if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759                    QLA_SUCCESS)
1760                        return QLA_FUNCTION_FAILED;
1761                vha->marker_needed = 0;
1762        }
1763
1764        /* Acquire ring specific lock */
1765        spin_lock_irqsave(&ha->hardware_lock, flags);
1766
1767        handle = qla2xxx_get_next_handle(req);
1768        if (handle == 0)
1769                goto queuing_error;
1770
1771        /* Compute number of required data segments */
1772        /* Map the sg table so we have an accurate count of sg entries needed */
1773        if (scsi_sg_count(cmd)) {
1774                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1775                    scsi_sg_count(cmd), cmd->sc_data_direction);
1776                if (unlikely(!nseg))
1777                        goto queuing_error;
1778                else
1779                        sp->flags |= SRB_DMA_VALID;
1780
1781                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1782                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1783                        struct qla2_sgx sgx;
1784                        uint32_t        partial;
1785
1786                        memset(&sgx, 0, sizeof(struct qla2_sgx));
1787                        sgx.tot_bytes = scsi_bufflen(cmd);
1788                        sgx.cur_sg = scsi_sglist(cmd);
1789                        sgx.sp = sp;
1790
1791                        nseg = 0;
1792                        while (qla24xx_get_one_block_sg(
1793                            cmd->device->sector_size, &sgx, &partial))
1794                                nseg++;
1795                }
1796        } else
1797                nseg = 0;
1798
1799        /* number of required data segments */
1800        tot_dsds = nseg;
1801
1802        /* Compute number of required protection segments */
1803        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1804                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1805                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1806                if (unlikely(!nseg))
1807                        goto queuing_error;
1808                else
1809                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
1810
1811                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1812                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1813                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1814                }
1815        } else {
1816                nseg = 0;
1817        }
1818
1819        req_cnt = 1;
1820        /* Total Data and protection sg segment(s) */
1821        tot_prot_dsds = nseg;
1822        tot_dsds += nseg;
1823        if (req->cnt < (req_cnt + 2)) {
1824                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1825                    RD_REG_DWORD_RELAXED(req->req_q_out);
1826                if (req->ring_index < cnt)
1827                        req->cnt = cnt - req->ring_index;
1828                else
1829                        req->cnt = req->length -
1830                                (req->ring_index - cnt);
1831                if (req->cnt < (req_cnt + 2))
1832                        goto queuing_error;
1833        }
1834
1835        status |= QDSS_GOT_Q_SPACE;
1836
1837        /* Build header part of command packet (excluding the OPCODE). */
1838        req->current_outstanding_cmd = handle;
1839        req->outstanding_cmds[handle] = sp;
1840        sp->handle = handle;
1841        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1842        req->cnt -= req_cnt;
1843
1844        /* Fill-in common area */
1845        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1846        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1847
1848        clr_ptr = (uint32_t *)cmd_pkt + 2;
1849        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1850
1851        /* Set NPORT-ID and LUN number*/
1852        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1853        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1854        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1855        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1856
1857        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1858        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1859
1860        /* Total Data and protection segment(s) */
1861        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1862
1863        /* Build IOCB segments and adjust for data protection segments */
1864        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1865            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1866                QLA_SUCCESS)
1867                goto queuing_error;
1868
1869        cmd_pkt->entry_count = (uint8_t)req_cnt;
1870        /* Specify response queue number where completion should happen */
1871        cmd_pkt->entry_status = (uint8_t) rsp->id;
1872        cmd_pkt->timeout = cpu_to_le16(0);
1873        wmb();
1874
1875        /* Adjust ring index. */
1876        req->ring_index++;
1877        if (req->ring_index == req->length) {
1878                req->ring_index = 0;
1879                req->ring_ptr = req->ring;
1880        } else
1881                req->ring_ptr++;
1882
1883        /* Set chip new ring index. */
1884        WRT_REG_DWORD(req->req_q_in, req->ring_index);
1885
1886        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1887
1888        return QLA_SUCCESS;
1889
1890queuing_error:
1891        if (status & QDSS_GOT_Q_SPACE) {
1892                req->outstanding_cmds[handle] = NULL;
1893                req->cnt += req_cnt;
1894        }
1895        /* Cleanup will be performed by the caller (queuecommand) */
1896
1897        spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898        return QLA_FUNCTION_FAILED;
1899}
1900
1901/**
1902 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1903 * @sp: command to send to the ISP
1904 *
1905 * Returns non-zero if a failure occurred, else zero.
1906 */
1907static int
1908qla2xxx_start_scsi_mq(srb_t *sp)
1909{
1910        int             nseg;
1911        unsigned long   flags;
1912        uint32_t        *clr_ptr;
1913        uint32_t        handle;
1914        struct cmd_type_7 *cmd_pkt;
1915        uint16_t        cnt;
1916        uint16_t        req_cnt;
1917        uint16_t        tot_dsds;
1918        struct req_que *req = NULL;
1919        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1920        struct scsi_qla_host *vha = sp->fcport->vha;
1921        struct qla_hw_data *ha = vha->hw;
1922        struct qla_qpair *qpair = sp->qpair;
1923
1924        /* Acquire qpair specific lock */
1925        spin_lock_irqsave(&qpair->qp_lock, flags);
1926
1927        /* Setup qpair pointers */
1928        req = qpair->req;
1929
1930        /* So we know we haven't pci_map'ed anything yet */
1931        tot_dsds = 0;
1932
1933        /* Send marker if required */
1934        if (vha->marker_needed != 0) {
1935                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936                    QLA_SUCCESS) {
1937                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938                        return QLA_FUNCTION_FAILED;
1939                }
1940                vha->marker_needed = 0;
1941        }
1942
1943        handle = qla2xxx_get_next_handle(req);
1944        if (handle == 0)
1945                goto queuing_error;
1946
1947        /* Map the sg table so we have an accurate count of sg entries needed */
1948        if (scsi_sg_count(cmd)) {
1949                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950                    scsi_sg_count(cmd), cmd->sc_data_direction);
1951                if (unlikely(!nseg))
1952                        goto queuing_error;
1953        } else
1954                nseg = 0;
1955
1956        tot_dsds = nseg;
1957        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958        if (req->cnt < (req_cnt + 2)) {
1959                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1960                    RD_REG_DWORD_RELAXED(req->req_q_out);
1961                if (req->ring_index < cnt)
1962                        req->cnt = cnt - req->ring_index;
1963                else
1964                        req->cnt = req->length -
1965                                (req->ring_index - cnt);
1966                if (req->cnt < (req_cnt + 2))
1967                        goto queuing_error;
1968        }
1969
1970        /* Build command packet. */
1971        req->current_outstanding_cmd = handle;
1972        req->outstanding_cmds[handle] = sp;
1973        sp->handle = handle;
1974        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1975        req->cnt -= req_cnt;
1976
1977        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1978        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1979
1980        /* Zero out remaining portion of packet. */
1981        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1982        clr_ptr = (uint32_t *)cmd_pkt + 2;
1983        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1984        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1985
1986        /* Set NPORT-ID and LUN number*/
1987        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1988        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1989        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1990        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1991        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1992
1993        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1994        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1995
1996        cmd_pkt->task = TSK_SIMPLE;
1997
1998        /* Load SCSI command packet. */
1999        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2000        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2001
2002        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2003
2004        /* Build IOCB segments */
2005        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2006
2007        /* Set total data segment count. */
2008        cmd_pkt->entry_count = (uint8_t)req_cnt;
2009        wmb();
2010        /* Adjust ring index. */
2011        req->ring_index++;
2012        if (req->ring_index == req->length) {
2013                req->ring_index = 0;
2014                req->ring_ptr = req->ring;
2015        } else
2016                req->ring_ptr++;
2017
2018        sp->flags |= SRB_DMA_VALID;
2019
2020        /* Set chip new ring index. */
2021        WRT_REG_DWORD(req->req_q_in, req->ring_index);
2022
2023        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2024        return QLA_SUCCESS;
2025
2026queuing_error:
2027        if (tot_dsds)
2028                scsi_dma_unmap(cmd);
2029
2030        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2031
2032        return QLA_FUNCTION_FAILED;
2033}
2034
2035
2036/**
2037 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2038 * @sp: command to send to the ISP
2039 *
2040 * Returns non-zero if a failure occurred, else zero.
2041 */
2042int
2043qla2xxx_dif_start_scsi_mq(srb_t *sp)
2044{
2045        int                     nseg;
2046        unsigned long           flags;
2047        uint32_t                *clr_ptr;
2048        uint32_t                handle;
2049        uint16_t                cnt;
2050        uint16_t                req_cnt = 0;
2051        uint16_t                tot_dsds;
2052        uint16_t                tot_prot_dsds;
2053        uint16_t                fw_prot_opts = 0;
2054        struct req_que          *req = NULL;
2055        struct rsp_que          *rsp = NULL;
2056        struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2057        struct scsi_qla_host    *vha = sp->fcport->vha;
2058        struct qla_hw_data      *ha = vha->hw;
2059        struct cmd_type_crc_2   *cmd_pkt;
2060        uint32_t                status = 0;
2061        struct qla_qpair        *qpair = sp->qpair;
2062
2063#define QDSS_GOT_Q_SPACE        BIT_0
2064
2065        /* Check for host side state */
2066        if (!qpair->online) {
2067                cmd->result = DID_NO_CONNECT << 16;
2068                return QLA_INTERFACE_ERROR;
2069        }
2070
2071        if (!qpair->difdix_supported &&
2072                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2073                cmd->result = DID_NO_CONNECT << 16;
2074                return QLA_INTERFACE_ERROR;
2075        }
2076
2077        /* Only process protection or >16 cdb in this routine */
2078        if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2079                if (cmd->cmd_len <= 16)
2080                        return qla2xxx_start_scsi_mq(sp);
2081        }
2082
2083        spin_lock_irqsave(&qpair->qp_lock, flags);
2084
2085        /* Setup qpair pointers */
2086        rsp = qpair->rsp;
2087        req = qpair->req;
2088
2089        /* So we know we haven't pci_map'ed anything yet */
2090        tot_dsds = 0;
2091
2092        /* Send marker if required */
2093        if (vha->marker_needed != 0) {
2094                if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095                    QLA_SUCCESS) {
2096                        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2097                        return QLA_FUNCTION_FAILED;
2098                }
2099                vha->marker_needed = 0;
2100        }
2101
2102        handle = qla2xxx_get_next_handle(req);
2103        if (handle == 0)
2104                goto queuing_error;
2105
2106        /* Compute number of required data segments */
2107        /* Map the sg table so we have an accurate count of sg entries needed */
2108        if (scsi_sg_count(cmd)) {
2109                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2110                    scsi_sg_count(cmd), cmd->sc_data_direction);
2111                if (unlikely(!nseg))
2112                        goto queuing_error;
2113                else
2114                        sp->flags |= SRB_DMA_VALID;
2115
2116                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2117                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2118                        struct qla2_sgx sgx;
2119                        uint32_t        partial;
2120
2121                        memset(&sgx, 0, sizeof(struct qla2_sgx));
2122                        sgx.tot_bytes = scsi_bufflen(cmd);
2123                        sgx.cur_sg = scsi_sglist(cmd);
2124                        sgx.sp = sp;
2125
2126                        nseg = 0;
2127                        while (qla24xx_get_one_block_sg(
2128                            cmd->device->sector_size, &sgx, &partial))
2129                                nseg++;
2130                }
2131        } else
2132                nseg = 0;
2133
2134        /* number of required data segments */
2135        tot_dsds = nseg;
2136
2137        /* Compute number of required protection segments */
2138        if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2139                nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2140                    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2141                if (unlikely(!nseg))
2142                        goto queuing_error;
2143                else
2144                        sp->flags |= SRB_CRC_PROT_DMA_VALID;
2145
2146                if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2147                    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2148                        nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2149                }
2150        } else {
2151                nseg = 0;
2152        }
2153
2154        req_cnt = 1;
2155        /* Total Data and protection sg segment(s) */
2156        tot_prot_dsds = nseg;
2157        tot_dsds += nseg;
2158        if (req->cnt < (req_cnt + 2)) {
2159                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2160                    RD_REG_DWORD_RELAXED(req->req_q_out);
2161                if (req->ring_index < cnt)
2162                        req->cnt = cnt - req->ring_index;
2163                else
2164                        req->cnt = req->length -
2165                                (req->ring_index - cnt);
2166                if (req->cnt < (req_cnt + 2))
2167                        goto queuing_error;
2168        }
2169
2170        status |= QDSS_GOT_Q_SPACE;
2171
2172        /* Build header part of command packet (excluding the OPCODE). */
2173        req->current_outstanding_cmd = handle;
2174        req->outstanding_cmds[handle] = sp;
2175        sp->handle = handle;
2176        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2177        req->cnt -= req_cnt;
2178
2179        /* Fill-in common area */
2180        cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2181        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2182
2183        clr_ptr = (uint32_t *)cmd_pkt + 2;
2184        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2185
2186        /* Set NPORT-ID and LUN number*/
2187        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2188        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2189        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2190        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2191
2192        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2193        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2194
2195        /* Total Data and protection segment(s) */
2196        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2197
2198        /* Build IOCB segments and adjust for data protection segments */
2199        if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2200            req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2201                QLA_SUCCESS)
2202                goto queuing_error;
2203
2204        cmd_pkt->entry_count = (uint8_t)req_cnt;
2205        cmd_pkt->timeout = cpu_to_le16(0);
2206        wmb();
2207
2208        /* Adjust ring index. */
2209        req->ring_index++;
2210        if (req->ring_index == req->length) {
2211                req->ring_index = 0;
2212                req->ring_ptr = req->ring;
2213        } else
2214                req->ring_ptr++;
2215
2216        /* Set chip new ring index. */
2217        WRT_REG_DWORD(req->req_q_in, req->ring_index);
2218
2219        /* Manage unprocessed RIO/ZIO commands in response queue. */
2220        if (vha->flags.process_response_queue &&
2221            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2222                qla24xx_process_response_queue(vha, rsp);
2223
2224        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2225
2226        return QLA_SUCCESS;
2227
2228queuing_error:
2229        if (status & QDSS_GOT_Q_SPACE) {
2230                req->outstanding_cmds[handle] = NULL;
2231                req->cnt += req_cnt;
2232        }
2233        /* Cleanup will be performed by the caller (queuecommand) */
2234
2235        spin_unlock_irqrestore(&qpair->qp_lock, flags);
2236        return QLA_FUNCTION_FAILED;
2237}
2238
2239/* Generic Control-SRB manipulation functions. */
2240
2241/* hardware_lock assumed to be held. */
2242
2243void *
2244__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2245{
2246        scsi_qla_host_t *vha = qpair->vha;
2247        struct qla_hw_data *ha = vha->hw;
2248        struct req_que *req = qpair->req;
2249        device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2250        uint32_t handle;
2251        request_t *pkt;
2252        uint16_t cnt, req_cnt;
2253
2254        pkt = NULL;
2255        req_cnt = 1;
2256        handle = 0;
2257
2258        if (sp && (sp->type != SRB_SCSI_CMD)) {
2259                /* Adjust entry-counts as needed. */
2260                req_cnt = sp->iocbs;
2261        }
2262
2263        /* Check for room on request queue. */
2264        if (req->cnt < req_cnt + 2) {
2265                if (qpair->use_shadow_reg)
2266                        cnt = *req->out_ptr;
2267                else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268                    IS_QLA28XX(ha))
2269                        cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2270                else if (IS_P3P_TYPE(ha))
2271                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2272                else if (IS_FWI2_CAPABLE(ha))
2273                        cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2274                else if (IS_QLAFX00(ha))
2275                        cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2276                else
2277                        cnt = qla2x00_debounce_register(
2278                            ISP_REQ_Q_OUT(ha, &reg->isp));
2279
2280                if  (req->ring_index < cnt)
2281                        req->cnt = cnt - req->ring_index;
2282                else
2283                        req->cnt = req->length -
2284                            (req->ring_index - cnt);
2285        }
2286        if (req->cnt < req_cnt + 2)
2287                goto queuing_error;
2288
2289        if (sp) {
2290                handle = qla2xxx_get_next_handle(req);
2291                if (handle == 0) {
2292                        ql_log(ql_log_warn, vha, 0x700b,
2293                            "No room on outstanding cmd array.\n");
2294                        goto queuing_error;
2295                }
2296
2297                /* Prep command array. */
2298                req->current_outstanding_cmd = handle;
2299                req->outstanding_cmds[handle] = sp;
2300                sp->handle = handle;
2301        }
2302
2303        /* Prep packet */
2304        req->cnt -= req_cnt;
2305        pkt = req->ring_ptr;
2306        memset(pkt, 0, REQUEST_ENTRY_SIZE);
2307        if (IS_QLAFX00(ha)) {
2308                WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2309                WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2310        } else {
2311                pkt->entry_count = req_cnt;
2312                pkt->handle = handle;
2313        }
2314
2315        return pkt;
2316
2317queuing_error:
2318        qpair->tgt_counters.num_alloc_iocb_failed++;
2319        return pkt;
2320}
2321
2322void *
2323qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2324{
2325        scsi_qla_host_t *vha = qpair->vha;
2326
2327        if (qla2x00_reset_active(vha))
2328                return NULL;
2329
2330        return __qla2x00_alloc_iocbs(qpair, sp);
2331}
2332
2333void *
2334qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2335{
2336        return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2337}
2338
2339static void
2340qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2341{
2342        struct srb_iocb *lio = &sp->u.iocb_cmd;
2343
2344        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2345        logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2346        if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2347                logio->control_flags |= LCF_NVME_PRLI;
2348                if (sp->vha->flags.nvme_first_burst)
2349                        logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2350        }
2351
2352        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354        logio->port_id[1] = sp->fcport->d_id.b.area;
2355        logio->port_id[2] = sp->fcport->d_id.b.domain;
2356        logio->vp_index = sp->vha->vp_idx;
2357}
2358
2359static void
2360qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2361{
2362        struct srb_iocb *lio = &sp->u.iocb_cmd;
2363
2364        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2365        if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2366                logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2367        } else {
2368                logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2369                if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2370                        logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2371                if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2372                        logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2373        }
2374        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2375        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2376        logio->port_id[1] = sp->fcport->d_id.b.area;
2377        logio->port_id[2] = sp->fcport->d_id.b.domain;
2378        logio->vp_index = sp->vha->vp_idx;
2379}
2380
2381static void
2382qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2383{
2384        struct qla_hw_data *ha = sp->vha->hw;
2385        struct srb_iocb *lio = &sp->u.iocb_cmd;
2386        uint16_t opts;
2387
2388        mbx->entry_type = MBX_IOCB_TYPE;
2389        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2390        mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2391        opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2392        opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2393        if (HAS_EXTENDED_IDS(ha)) {
2394                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2395                mbx->mb10 = cpu_to_le16(opts);
2396        } else {
2397                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2398        }
2399        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2400        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2401            sp->fcport->d_id.b.al_pa);
2402        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2403}
2404
2405static void
2406qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2407{
2408        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2409        logio->control_flags =
2410            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2411        if (!sp->fcport->keep_nport_handle)
2412                logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2413        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2414        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2415        logio->port_id[1] = sp->fcport->d_id.b.area;
2416        logio->port_id[2] = sp->fcport->d_id.b.domain;
2417        logio->vp_index = sp->vha->vp_idx;
2418}
2419
2420static void
2421qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2422{
2423        struct qla_hw_data *ha = sp->vha->hw;
2424
2425        mbx->entry_type = MBX_IOCB_TYPE;
2426        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2427        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2428        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2429            cpu_to_le16(sp->fcport->loop_id) :
2430            cpu_to_le16(sp->fcport->loop_id << 8);
2431        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2432        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2433            sp->fcport->d_id.b.al_pa);
2434        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2435        /* Implicit: mbx->mbx10 = 0. */
2436}
2437
2438static void
2439qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2440{
2441        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2442        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2443        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2444        logio->vp_index = sp->vha->vp_idx;
2445}
2446
2447static void
2448qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2449{
2450        struct qla_hw_data *ha = sp->vha->hw;
2451
2452        mbx->entry_type = MBX_IOCB_TYPE;
2453        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2454        mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2455        if (HAS_EXTENDED_IDS(ha)) {
2456                mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2457                mbx->mb10 = cpu_to_le16(BIT_0);
2458        } else {
2459                mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2460        }
2461        mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2462        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2463        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2464        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2465        mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2466}
2467
2468static void
2469qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2470{
2471        uint32_t flags;
2472        uint64_t lun;
2473        struct fc_port *fcport = sp->fcport;
2474        scsi_qla_host_t *vha = fcport->vha;
2475        struct qla_hw_data *ha = vha->hw;
2476        struct srb_iocb *iocb = &sp->u.iocb_cmd;
2477        struct req_que *req = vha->req;
2478
2479        flags = iocb->u.tmf.flags;
2480        lun = iocb->u.tmf.lun;
2481
2482        tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2483        tsk->entry_count = 1;
2484        tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2485        tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2486        tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2487        tsk->control_flags = cpu_to_le32(flags);
2488        tsk->port_id[0] = fcport->d_id.b.al_pa;
2489        tsk->port_id[1] = fcport->d_id.b.area;
2490        tsk->port_id[2] = fcport->d_id.b.domain;
2491        tsk->vp_index = fcport->vha->vp_idx;
2492
2493        if (flags == TCF_LUN_RESET) {
2494                int_to_scsilun(lun, &tsk->lun);
2495                host_to_fcp_swap((uint8_t *)&tsk->lun,
2496                        sizeof(tsk->lun));
2497        }
2498}
2499
2500void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2501{
2502        timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2503        sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2504        sp->free = qla2x00_sp_free;
2505        if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2506                init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2507        sp->start_timer = 1;
2508}
2509
2510static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2511{
2512        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2513
2514        kfree(sp->fcport);
2515
2516        if (elsio->u.els_logo.els_logo_pyld)
2517                dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2518                    elsio->u.els_logo.els_logo_pyld,
2519                    elsio->u.els_logo.els_logo_pyld_dma);
2520
2521        del_timer(&elsio->timer);
2522        qla2x00_rel_sp(sp);
2523}
2524
2525static void
2526qla2x00_els_dcmd_iocb_timeout(void *data)
2527{
2528        srb_t *sp = data;
2529        fc_port_t *fcport = sp->fcport;
2530        struct scsi_qla_host *vha = sp->vha;
2531        struct srb_iocb *lio = &sp->u.iocb_cmd;
2532
2533        ql_dbg(ql_dbg_io, vha, 0x3069,
2534            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2535            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2536            fcport->d_id.b.al_pa);
2537
2538        complete(&lio->u.els_logo.comp);
2539}
2540
2541static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2542{
2543        fc_port_t *fcport = sp->fcport;
2544        struct srb_iocb *lio = &sp->u.iocb_cmd;
2545        struct scsi_qla_host *vha = sp->vha;
2546
2547        ql_dbg(ql_dbg_io, vha, 0x3072,
2548            "%s hdl=%x, portid=%02x%02x%02x done\n",
2549            sp->name, sp->handle, fcport->d_id.b.domain,
2550            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2551
2552        complete(&lio->u.els_logo.comp);
2553}
2554
2555int
2556qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2557    port_id_t remote_did)
2558{
2559        srb_t *sp;
2560        fc_port_t *fcport = NULL;
2561        struct srb_iocb *elsio = NULL;
2562        struct qla_hw_data *ha = vha->hw;
2563        struct els_logo_payload logo_pyld;
2564        int rval = QLA_SUCCESS;
2565
2566        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2567        if (!fcport) {
2568               ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2569               return -ENOMEM;
2570        }
2571
2572        /* Alloc SRB structure */
2573        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2574        if (!sp) {
2575                kfree(fcport);
2576                ql_log(ql_log_info, vha, 0x70e6,
2577                 "SRB allocation failed\n");
2578                return -ENOMEM;
2579        }
2580
2581        elsio = &sp->u.iocb_cmd;
2582        fcport->loop_id = 0xFFFF;
2583        fcport->d_id.b.domain = remote_did.b.domain;
2584        fcport->d_id.b.area = remote_did.b.area;
2585        fcport->d_id.b.al_pa = remote_did.b.al_pa;
2586
2587        ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2588            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2589
2590        sp->type = SRB_ELS_DCMD;
2591        sp->name = "ELS_DCMD";
2592        sp->fcport = fcport;
2593        elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2594        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2595        init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2596        sp->done = qla2x00_els_dcmd_sp_done;
2597        sp->free = qla2x00_els_dcmd_sp_free;
2598
2599        elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2600                            DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2601                            GFP_KERNEL);
2602
2603        if (!elsio->u.els_logo.els_logo_pyld) {
2604                sp->free(sp);
2605                return QLA_FUNCTION_FAILED;
2606        }
2607
2608        memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2609
2610        elsio->u.els_logo.els_cmd = els_opcode;
2611        logo_pyld.opcode = els_opcode;
2612        logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2613        logo_pyld.s_id[1] = vha->d_id.b.area;
2614        logo_pyld.s_id[2] = vha->d_id.b.domain;
2615        host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2616        memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2617
2618        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2619            sizeof(struct els_logo_payload));
2620
2621        rval = qla2x00_start_sp(sp);
2622        if (rval != QLA_SUCCESS) {
2623                sp->free(sp);
2624                return QLA_FUNCTION_FAILED;
2625        }
2626
2627        ql_dbg(ql_dbg_io, vha, 0x3074,
2628            "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2629            sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2630            fcport->d_id.b.area, fcport->d_id.b.al_pa);
2631
2632        wait_for_completion(&elsio->u.els_logo.comp);
2633
2634        sp->free(sp);
2635        return rval;
2636}
2637
2638static void
2639qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2640{
2641        scsi_qla_host_t *vha = sp->vha;
2642        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2643
2644        els_iocb->entry_type = ELS_IOCB_TYPE;
2645        els_iocb->entry_count = 1;
2646        els_iocb->sys_define = 0;
2647        els_iocb->entry_status = 0;
2648        els_iocb->handle = sp->handle;
2649        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2650        els_iocb->tx_dsd_count = 1;
2651        els_iocb->vp_index = vha->vp_idx;
2652        els_iocb->sof_type = EST_SOFI3;
2653        els_iocb->rx_dsd_count = 0;
2654        els_iocb->opcode = elsio->u.els_logo.els_cmd;
2655
2656        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2657        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2658        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2659        /* For SID the byte order is different than DID */
2660        els_iocb->s_id[1] = vha->d_id.b.al_pa;
2661        els_iocb->s_id[2] = vha->d_id.b.area;
2662        els_iocb->s_id[0] = vha->d_id.b.domain;
2663
2664        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2665                els_iocb->control_flags = 0;
2666                els_iocb->tx_byte_count = els_iocb->tx_len =
2667                        cpu_to_le32(sizeof(struct els_plogi_payload));
2668                put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2669                                   &els_iocb->tx_address);
2670                els_iocb->rx_dsd_count = 1;
2671                els_iocb->rx_byte_count = els_iocb->rx_len =
2672                        cpu_to_le32(sizeof(struct els_plogi_payload));
2673                put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2674                                   &els_iocb->rx_address);
2675
2676                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2677                    "PLOGI ELS IOCB:\n");
2678                ql_dump_buffer(ql_log_info, vha, 0x0109,
2679                    (uint8_t *)els_iocb, 0x70);
2680        } else {
2681                els_iocb->control_flags = 1 << 13;
2682                els_iocb->tx_byte_count =
2683                        cpu_to_le32(sizeof(struct els_logo_payload));
2684                put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2685                                   &els_iocb->tx_address);
2686                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2687
2688                els_iocb->rx_byte_count = 0;
2689                els_iocb->rx_address = 0;
2690                els_iocb->rx_len = 0;
2691        }
2692
2693        sp->vha->qla_stats.control_requests++;
2694}
2695
2696static void
2697qla2x00_els_dcmd2_iocb_timeout(void *data)
2698{
2699        srb_t *sp = data;
2700        fc_port_t *fcport = sp->fcport;
2701        struct scsi_qla_host *vha = sp->vha;
2702        struct qla_hw_data *ha = vha->hw;
2703        unsigned long flags = 0;
2704        int res;
2705
2706        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2707            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2708            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2709
2710        /* Abort the exchange */
2711        spin_lock_irqsave(&ha->hardware_lock, flags);
2712        res = ha->isp_ops->abort_command(sp);
2713        ql_dbg(ql_dbg_io, vha, 0x3070,
2714            "mbx abort_command %s\n",
2715            (res == QLA_SUCCESS) ? "successful" : "failed");
2716        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2717
2718        sp->done(sp, QLA_FUNCTION_TIMEOUT);
2719}
2720
2721void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2722{
2723        if (els_plogi->els_plogi_pyld)
2724                dma_free_coherent(&vha->hw->pdev->dev,
2725                                  els_plogi->tx_size,
2726                                  els_plogi->els_plogi_pyld,
2727                                  els_plogi->els_plogi_pyld_dma);
2728
2729        if (els_plogi->els_resp_pyld)
2730                dma_free_coherent(&vha->hw->pdev->dev,
2731                                  els_plogi->rx_size,
2732                                  els_plogi->els_resp_pyld,
2733                                  els_plogi->els_resp_pyld_dma);
2734}
2735
2736static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2737{
2738        fc_port_t *fcport = sp->fcport;
2739        struct srb_iocb *lio = &sp->u.iocb_cmd;
2740        struct scsi_qla_host *vha = sp->vha;
2741        struct event_arg ea;
2742        struct qla_work_evt *e;
2743
2744        ql_dbg(ql_dbg_disc, vha, 0x3072,
2745            "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2746            sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2747
2748        fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2749        del_timer(&sp->u.iocb_cmd.timer);
2750
2751        if (sp->flags & SRB_WAKEUP_ON_COMP)
2752                complete(&lio->u.els_plogi.comp);
2753        else {
2754                if (res) {
2755                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2756                } else {
2757                        memset(&ea, 0, sizeof(ea));
2758                        ea.fcport = fcport;
2759                        ea.data[0] = MBS_COMMAND_COMPLETE;
2760                        ea.sp = sp;
2761                        qla24xx_handle_plogi_done_event(vha, &ea);
2762                }
2763
2764                e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2765                if (!e) {
2766                        struct srb_iocb *elsio = &sp->u.iocb_cmd;
2767
2768                        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2769                        sp->free(sp);
2770                        return;
2771                }
2772                e->u.iosb.sp = sp;
2773                qla2x00_post_work(vha, e);
2774        }
2775}
2776
2777int
2778qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2779    fc_port_t *fcport, bool wait)
2780{
2781        srb_t *sp;
2782        struct srb_iocb *elsio = NULL;
2783        struct qla_hw_data *ha = vha->hw;
2784        int rval = QLA_SUCCESS;
2785        void    *ptr, *resp_ptr;
2786
2787        /* Alloc SRB structure */
2788        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2789        if (!sp) {
2790                ql_log(ql_log_info, vha, 0x70e6,
2791                 "SRB allocation failed\n");
2792                return -ENOMEM;
2793        }
2794
2795        elsio = &sp->u.iocb_cmd;
2796        ql_dbg(ql_dbg_io, vha, 0x3073,
2797            "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2798
2799        fcport->flags |= FCF_ASYNC_SENT;
2800        sp->type = SRB_ELS_DCMD;
2801        sp->name = "ELS_DCMD";
2802        sp->fcport = fcport;
2803
2804        elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2805        init_completion(&elsio->u.els_plogi.comp);
2806        if (wait)
2807                sp->flags = SRB_WAKEUP_ON_COMP;
2808
2809        qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2810
2811        sp->done = qla2x00_els_dcmd2_sp_done;
2812        elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2813
2814        ptr = elsio->u.els_plogi.els_plogi_pyld =
2815            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2816                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2817
2818        if (!elsio->u.els_plogi.els_plogi_pyld) {
2819                rval = QLA_FUNCTION_FAILED;
2820                goto out;
2821        }
2822
2823        resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2824            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2825                &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2826
2827        if (!elsio->u.els_plogi.els_resp_pyld) {
2828                rval = QLA_FUNCTION_FAILED;
2829                goto out;
2830        }
2831
2832        ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2833
2834        memset(ptr, 0, sizeof(struct els_plogi_payload));
2835        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2836        memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2837            &ha->plogi_els_payld.data,
2838            sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2839
2840        elsio->u.els_plogi.els_cmd = els_opcode;
2841        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2842
2843        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2844        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2845            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2846
2847        rval = qla2x00_start_sp(sp);
2848        if (rval != QLA_SUCCESS) {
2849                rval = QLA_FUNCTION_FAILED;
2850        } else {
2851                ql_dbg(ql_dbg_disc, vha, 0x3074,
2852                    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2853                    sp->name, sp->handle, fcport->loop_id,
2854                    fcport->d_id.b24, vha->d_id.b24);
2855        }
2856
2857        if (wait) {
2858                wait_for_completion(&elsio->u.els_plogi.comp);
2859
2860                if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2861                        rval = QLA_FUNCTION_FAILED;
2862        } else {
2863                goto done;
2864        }
2865
2866out:
2867        fcport->flags &= ~(FCF_ASYNC_SENT);
2868        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2869        sp->free(sp);
2870done:
2871        return rval;
2872}
2873
2874static void
2875qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2876{
2877        struct bsg_job *bsg_job = sp->u.bsg_job;
2878        struct fc_bsg_request *bsg_request = bsg_job->request;
2879
2880        els_iocb->entry_type = ELS_IOCB_TYPE;
2881        els_iocb->entry_count = 1;
2882        els_iocb->sys_define = 0;
2883        els_iocb->entry_status = 0;
2884        els_iocb->handle = sp->handle;
2885        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2886        els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2887        els_iocb->vp_index = sp->vha->vp_idx;
2888        els_iocb->sof_type = EST_SOFI3;
2889        els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2890
2891        els_iocb->opcode =
2892            sp->type == SRB_ELS_CMD_RPT ?
2893            bsg_request->rqst_data.r_els.els_code :
2894            bsg_request->rqst_data.h_els.command_code;
2895        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2896        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2897        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2898        els_iocb->control_flags = 0;
2899        els_iocb->rx_byte_count =
2900            cpu_to_le32(bsg_job->reply_payload.payload_len);
2901        els_iocb->tx_byte_count =
2902            cpu_to_le32(bsg_job->request_payload.payload_len);
2903
2904        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2905                           &els_iocb->tx_address);
2906        els_iocb->tx_len = cpu_to_le32(sg_dma_len
2907            (bsg_job->request_payload.sg_list));
2908
2909        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2910                           &els_iocb->rx_address);
2911        els_iocb->rx_len = cpu_to_le32(sg_dma_len
2912            (bsg_job->reply_payload.sg_list));
2913
2914        sp->vha->qla_stats.control_requests++;
2915}
2916
2917static void
2918qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2919{
2920        uint16_t        avail_dsds;
2921        struct dsd64    *cur_dsd;
2922        struct scatterlist *sg;
2923        int index;
2924        uint16_t tot_dsds;
2925        scsi_qla_host_t *vha = sp->vha;
2926        struct qla_hw_data *ha = vha->hw;
2927        struct bsg_job *bsg_job = sp->u.bsg_job;
2928        int entry_count = 1;
2929
2930        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2931        ct_iocb->entry_type = CT_IOCB_TYPE;
2932        ct_iocb->entry_status = 0;
2933        ct_iocb->handle1 = sp->handle;
2934        SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2935        ct_iocb->status = cpu_to_le16(0);
2936        ct_iocb->control_flags = cpu_to_le16(0);
2937        ct_iocb->timeout = 0;
2938        ct_iocb->cmd_dsd_count =
2939            cpu_to_le16(bsg_job->request_payload.sg_cnt);
2940        ct_iocb->total_dsd_count =
2941            cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2942        ct_iocb->req_bytecount =
2943            cpu_to_le32(bsg_job->request_payload.payload_len);
2944        ct_iocb->rsp_bytecount =
2945            cpu_to_le32(bsg_job->reply_payload.payload_len);
2946
2947        put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2948                           &ct_iocb->req_dsd.address);
2949        ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
2950
2951        put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2952                           &ct_iocb->rsp_dsd.address);
2953        ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
2954
2955        avail_dsds = 1;
2956        cur_dsd = &ct_iocb->rsp_dsd;
2957        index = 0;
2958        tot_dsds = bsg_job->reply_payload.sg_cnt;
2959
2960        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2961                cont_a64_entry_t *cont_pkt;
2962
2963                /* Allocate additional continuation packets? */
2964                if (avail_dsds == 0) {
2965                        /*
2966                        * Five DSDs are available in the Cont.
2967                        * Type 1 IOCB.
2968                               */
2969                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2970                            vha->hw->req_q_map[0]);
2971                        cur_dsd = cont_pkt->dsd;
2972                        avail_dsds = 5;
2973                        entry_count++;
2974                }
2975
2976                append_dsd64(&cur_dsd, sg);
2977                avail_dsds--;
2978        }
2979        ct_iocb->entry_count = entry_count;
2980
2981        sp->vha->qla_stats.control_requests++;
2982}
2983
2984static void
2985qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2986{
2987        uint16_t        avail_dsds;
2988        struct dsd64    *cur_dsd;
2989        struct scatterlist *sg;
2990        int index;
2991        uint16_t cmd_dsds, rsp_dsds;
2992        scsi_qla_host_t *vha = sp->vha;
2993        struct qla_hw_data *ha = vha->hw;
2994        struct bsg_job *bsg_job = sp->u.bsg_job;
2995        int entry_count = 1;
2996        cont_a64_entry_t *cont_pkt = NULL;
2997
2998        ct_iocb->entry_type = CT_IOCB_TYPE;
2999        ct_iocb->entry_status = 0;
3000        ct_iocb->sys_define = 0;
3001        ct_iocb->handle = sp->handle;
3002
3003        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3004        ct_iocb->vp_index = sp->vha->vp_idx;
3005        ct_iocb->comp_status = cpu_to_le16(0);
3006
3007        cmd_dsds = bsg_job->request_payload.sg_cnt;
3008        rsp_dsds = bsg_job->reply_payload.sg_cnt;
3009
3010        ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3011        ct_iocb->timeout = 0;
3012        ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3013        ct_iocb->cmd_byte_count =
3014            cpu_to_le32(bsg_job->request_payload.payload_len);
3015
3016        avail_dsds = 2;
3017        cur_dsd = ct_iocb->dsd;
3018        index = 0;
3019
3020        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3021                /* Allocate additional continuation packets? */
3022                if (avail_dsds == 0) {
3023                        /*
3024                         * Five DSDs are available in the Cont.
3025                         * Type 1 IOCB.
3026                         */
3027                        cont_pkt = qla2x00_prep_cont_type1_iocb(
3028                            vha, ha->req_q_map[0]);
3029                        cur_dsd = cont_pkt->dsd;
3030                        avail_dsds = 5;
3031                        entry_count++;
3032                }
3033
3034                append_dsd64(&cur_dsd, sg);
3035                avail_dsds--;
3036        }
3037
3038        index = 0;
3039
3040        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3041                /* Allocate additional continuation packets? */
3042                if (avail_dsds == 0) {
3043                        /*
3044                        * Five DSDs are available in the Cont.
3045                        * Type 1 IOCB.
3046                               */
3047                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3048                            ha->req_q_map[0]);
3049                        cur_dsd = cont_pkt->dsd;
3050                        avail_dsds = 5;
3051                        entry_count++;
3052                }
3053
3054                append_dsd64(&cur_dsd, sg);
3055                avail_dsds--;
3056        }
3057        ct_iocb->entry_count = entry_count;
3058}
3059
3060/*
3061 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3062 * @sp: command to send to the ISP
3063 *
3064 * Returns non-zero if a failure occurred, else zero.
3065 */
3066int
3067qla82xx_start_scsi(srb_t *sp)
3068{
3069        int             nseg;
3070        unsigned long   flags;
3071        struct scsi_cmnd *cmd;
3072        uint32_t        *clr_ptr;
3073        uint32_t        handle;
3074        uint16_t        cnt;
3075        uint16_t        req_cnt;
3076        uint16_t        tot_dsds;
3077        struct device_reg_82xx __iomem *reg;
3078        uint32_t dbval;
3079        uint32_t *fcp_dl;
3080        uint8_t additional_cdb_len;
3081        struct ct6_dsd *ctx;
3082        struct scsi_qla_host *vha = sp->vha;
3083        struct qla_hw_data *ha = vha->hw;
3084        struct req_que *req = NULL;
3085        struct rsp_que *rsp = NULL;
3086
3087        /* Setup device pointers. */
3088        reg = &ha->iobase->isp82;
3089        cmd = GET_CMD_SP(sp);
3090        req = vha->req;
3091        rsp = ha->rsp_q_map[0];
3092
3093        /* So we know we haven't pci_map'ed anything yet */
3094        tot_dsds = 0;
3095
3096        dbval = 0x04 | (ha->portnum << 5);
3097
3098        /* Send marker if required */
3099        if (vha->marker_needed != 0) {
3100                if (qla2x00_marker(vha, ha->base_qpair,
3101                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3102                        ql_log(ql_log_warn, vha, 0x300c,
3103                            "qla2x00_marker failed for cmd=%p.\n", cmd);
3104                        return QLA_FUNCTION_FAILED;
3105                }
3106                vha->marker_needed = 0;
3107        }
3108
3109        /* Acquire ring specific lock */
3110        spin_lock_irqsave(&ha->hardware_lock, flags);
3111
3112        handle = qla2xxx_get_next_handle(req);
3113        if (handle == 0)
3114                goto queuing_error;
3115
3116        /* Map the sg table so we have an accurate count of sg entries needed */
3117        if (scsi_sg_count(cmd)) {
3118                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3119                    scsi_sg_count(cmd), cmd->sc_data_direction);
3120                if (unlikely(!nseg))
3121                        goto queuing_error;
3122        } else
3123                nseg = 0;
3124
3125        tot_dsds = nseg;
3126
3127        if (tot_dsds > ql2xshiftctondsd) {
3128                struct cmd_type_6 *cmd_pkt;
3129                uint16_t more_dsd_lists = 0;
3130                struct dsd_dma *dsd_ptr;
3131                uint16_t i;
3132
3133                more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3134                if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3135                        ql_dbg(ql_dbg_io, vha, 0x300d,
3136                            "Num of DSD list %d is than %d for cmd=%p.\n",
3137                            more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3138                            cmd);
3139                        goto queuing_error;
3140                }
3141
3142                if (more_dsd_lists <= ha->gbl_dsd_avail)
3143                        goto sufficient_dsds;
3144                else
3145                        more_dsd_lists -= ha->gbl_dsd_avail;
3146
3147                for (i = 0; i < more_dsd_lists; i++) {
3148                        dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3149                        if (!dsd_ptr) {
3150                                ql_log(ql_log_fatal, vha, 0x300e,
3151                                    "Failed to allocate memory for dsd_dma "
3152                                    "for cmd=%p.\n", cmd);
3153                                goto queuing_error;
3154                        }
3155
3156                        dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3157                                GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3158                        if (!dsd_ptr->dsd_addr) {
3159                                kfree(dsd_ptr);
3160                                ql_log(ql_log_fatal, vha, 0x300f,
3161                                    "Failed to allocate memory for dsd_addr "
3162                                    "for cmd=%p.\n", cmd);
3163                                goto queuing_error;
3164                        }
3165                        list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3166                        ha->gbl_dsd_avail++;
3167                }
3168
3169sufficient_dsds:
3170                req_cnt = 1;
3171
3172                if (req->cnt < (req_cnt + 2)) {
3173                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3174                                &reg->req_q_out[0]);
3175                        if (req->ring_index < cnt)
3176                                req->cnt = cnt - req->ring_index;
3177                        else
3178                                req->cnt = req->length -
3179                                        (req->ring_index - cnt);
3180                        if (req->cnt < (req_cnt + 2))
3181                                goto queuing_error;
3182                }
3183
3184                ctx = sp->u.scmd.ct6_ctx =
3185                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3186                if (!ctx) {
3187                        ql_log(ql_log_fatal, vha, 0x3010,
3188                            "Failed to allocate ctx for cmd=%p.\n", cmd);
3189                        goto queuing_error;
3190                }
3191
3192                memset(ctx, 0, sizeof(struct ct6_dsd));
3193                ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3194                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3195                if (!ctx->fcp_cmnd) {
3196                        ql_log(ql_log_fatal, vha, 0x3011,
3197                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3198                        goto queuing_error;
3199                }
3200
3201                /* Initialize the DSD list and dma handle */
3202                INIT_LIST_HEAD(&ctx->dsd_list);
3203                ctx->dsd_use_cnt = 0;
3204
3205                if (cmd->cmd_len > 16) {
3206                        additional_cdb_len = cmd->cmd_len - 16;
3207                        if ((cmd->cmd_len % 4) != 0) {
3208                                /* SCSI command bigger than 16 bytes must be
3209                                 * multiple of 4
3210                                 */
3211                                ql_log(ql_log_warn, vha, 0x3012,
3212                                    "scsi cmd len %d not multiple of 4 "
3213                                    "for cmd=%p.\n", cmd->cmd_len, cmd);
3214                                goto queuing_error_fcp_cmnd;
3215                        }
3216                        ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3217                } else {
3218                        additional_cdb_len = 0;
3219                        ctx->fcp_cmnd_len = 12 + 16 + 4;
3220                }
3221
3222                cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3223                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3224
3225                /* Zero out remaining portion of packet. */
3226                /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3227                clr_ptr = (uint32_t *)cmd_pkt + 2;
3228                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3229                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3230
3231                /* Set NPORT-ID and LUN number*/
3232                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3233                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3234                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3235                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3236                cmd_pkt->vp_index = sp->vha->vp_idx;
3237
3238                /* Build IOCB segments */
3239                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3240                        goto queuing_error_fcp_cmnd;
3241
3242                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3243                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3244
3245                /* build FCP_CMND IU */
3246                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3247                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3248
3249                if (cmd->sc_data_direction == DMA_TO_DEVICE)
3250                        ctx->fcp_cmnd->additional_cdb_len |= 1;
3251                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3252                        ctx->fcp_cmnd->additional_cdb_len |= 2;
3253
3254                /* Populate the FCP_PRIO. */
3255                if (ha->flags.fcp_prio_enabled)
3256                        ctx->fcp_cmnd->task_attribute |=
3257                            sp->fcport->fcp_prio << 3;
3258
3259                memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3260
3261                fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3262                    additional_cdb_len);
3263                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3264
3265                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3266                put_unaligned_le64(ctx->fcp_cmnd_dma,
3267                                   &cmd_pkt->fcp_cmnd_dseg_address);
3268
3269                sp->flags |= SRB_FCP_CMND_DMA_VALID;
3270                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3271                /* Set total data segment count. */
3272                cmd_pkt->entry_count = (uint8_t)req_cnt;
3273                /* Specify response queue number where
3274                 * completion should happen
3275                 */
3276                cmd_pkt->entry_status = (uint8_t) rsp->id;
3277        } else {
3278                struct cmd_type_7 *cmd_pkt;
3279
3280                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3281                if (req->cnt < (req_cnt + 2)) {
3282                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3283                            &reg->req_q_out[0]);
3284                        if (req->ring_index < cnt)
3285                                req->cnt = cnt - req->ring_index;
3286                        else
3287                                req->cnt = req->length -
3288                                        (req->ring_index - cnt);
3289                }
3290                if (req->cnt < (req_cnt + 2))
3291                        goto queuing_error;
3292
3293                cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3294                cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3295
3296                /* Zero out remaining portion of packet. */
3297                /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3298                clr_ptr = (uint32_t *)cmd_pkt + 2;
3299                memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3300                cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3301
3302                /* Set NPORT-ID and LUN number*/
3303                cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3304                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3305                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3306                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3307                cmd_pkt->vp_index = sp->vha->vp_idx;
3308
3309                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3310                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3311                    sizeof(cmd_pkt->lun));
3312
3313                /* Populate the FCP_PRIO. */
3314                if (ha->flags.fcp_prio_enabled)
3315                        cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3316
3317                /* Load SCSI command packet. */
3318                memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3319                host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3320
3321                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3322
3323                /* Build IOCB segments */
3324                qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3325
3326                /* Set total data segment count. */
3327                cmd_pkt->entry_count = (uint8_t)req_cnt;
3328                /* Specify response queue number where
3329                 * completion should happen.
3330                 */
3331                cmd_pkt->entry_status = (uint8_t) rsp->id;
3332
3333        }
3334        /* Build command packet. */
3335        req->current_outstanding_cmd = handle;
3336        req->outstanding_cmds[handle] = sp;
3337        sp->handle = handle;
3338        cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3339        req->cnt -= req_cnt;
3340        wmb();
3341
3342        /* Adjust ring index. */
3343        req->ring_index++;
3344        if (req->ring_index == req->length) {
3345                req->ring_index = 0;
3346                req->ring_ptr = req->ring;
3347        } else
3348                req->ring_ptr++;
3349
3350        sp->flags |= SRB_DMA_VALID;
3351
3352        /* Set chip new ring index. */
3353        /* write, read and verify logic */
3354        dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3355        if (ql2xdbwr)
3356                qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3357        else {
3358                WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3359                wmb();
3360                while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3361                        WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3362                        wmb();
3363                }
3364        }
3365
3366        /* Manage unprocessed RIO/ZIO commands in response queue. */
3367        if (vha->flags.process_response_queue &&
3368            rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3369                qla24xx_process_response_queue(vha, rsp);
3370
3371        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3372        return QLA_SUCCESS;
3373
3374queuing_error_fcp_cmnd:
3375        dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3376queuing_error:
3377        if (tot_dsds)
3378                scsi_dma_unmap(cmd);
3379
3380        if (sp->u.scmd.crc_ctx) {
3381                mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3382                sp->u.scmd.crc_ctx = NULL;
3383        }
3384        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3385
3386        return QLA_FUNCTION_FAILED;
3387}
3388
3389static void
3390qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3391{
3392        struct srb_iocb *aio = &sp->u.iocb_cmd;
3393        scsi_qla_host_t *vha = sp->vha;
3394        struct req_que *req = sp->qpair->req;
3395
3396        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3397        abt_iocb->entry_type = ABORT_IOCB_TYPE;
3398        abt_iocb->entry_count = 1;
3399        abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3400        if (sp->fcport) {
3401                abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3402                abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3403                abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3404                abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3405        }
3406        abt_iocb->handle_to_abort =
3407            cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3408                                    aio->u.abt.cmd_hndl));
3409        abt_iocb->vp_index = vha->vp_idx;
3410        abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3411        /* Send the command to the firmware */
3412        wmb();
3413}
3414
3415static void
3416qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3417{
3418        int i, sz;
3419
3420        mbx->entry_type = MBX_IOCB_TYPE;
3421        mbx->handle = sp->handle;
3422        sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3423
3424        for (i = 0; i < sz; i++)
3425                mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3426}
3427
3428static void
3429qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3430{
3431        sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3432        qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3433        ct_pkt->handle = sp->handle;
3434}
3435
3436static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3437        struct nack_to_isp *nack)
3438{
3439        struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3440
3441        nack->entry_type = NOTIFY_ACK_TYPE;
3442        nack->entry_count = 1;
3443        nack->ox_id = ntfy->ox_id;
3444
3445        nack->u.isp24.handle = sp->handle;
3446        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3447        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3448                nack->u.isp24.flags = ntfy->u.isp24.flags &
3449                        cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3450        }
3451        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3452        nack->u.isp24.status = ntfy->u.isp24.status;
3453        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3454        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3455        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3456        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3457        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3458        nack->u.isp24.srr_flags = 0;
3459        nack->u.isp24.srr_reject_code = 0;
3460        nack->u.isp24.srr_reject_code_expl = 0;
3461        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3462}
3463
3464/*
3465 * Build NVME LS request
3466 */
3467static int
3468qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3469{
3470        struct srb_iocb *nvme;
3471        int     rval = QLA_SUCCESS;
3472
3473        nvme = &sp->u.iocb_cmd;
3474        cmd_pkt->entry_type = PT_LS4_REQUEST;
3475        cmd_pkt->entry_count = 1;
3476        cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3477
3478        cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3479        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3480        cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3481
3482        cmd_pkt->tx_dseg_count = 1;
3483        cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3484        cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3485        put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3486
3487        cmd_pkt->rx_dseg_count = 1;
3488        cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3489        cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
3490        put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3491
3492        return rval;
3493}
3494
3495static void
3496qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3497{
3498        int map, pos;
3499
3500        vce->entry_type = VP_CTRL_IOCB_TYPE;
3501        vce->handle = sp->handle;
3502        vce->entry_count = 1;
3503        vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3504        vce->vp_count = cpu_to_le16(1);
3505
3506        /*
3507         * index map in firmware starts with 1; decrement index
3508         * this is ok as we never use index 0
3509         */
3510        map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3511        pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3512        vce->vp_idx_map[map] |= 1 << pos;
3513}
3514
3515static void
3516qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3517{
3518        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3519        logio->control_flags =
3520            cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3521
3522        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3523        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3524        logio->port_id[1] = sp->fcport->d_id.b.area;
3525        logio->port_id[2] = sp->fcport->d_id.b.domain;
3526        logio->vp_index = sp->fcport->vha->vp_idx;
3527}
3528
3529int
3530qla2x00_start_sp(srb_t *sp)
3531{
3532        int rval = QLA_SUCCESS;
3533        scsi_qla_host_t *vha = sp->vha;
3534        struct qla_hw_data *ha = vha->hw;
3535        struct qla_qpair *qp = sp->qpair;
3536        void *pkt;
3537        unsigned long flags;
3538
3539        spin_lock_irqsave(qp->qp_lock_ptr, flags);
3540        pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3541        if (!pkt) {
3542                rval = EAGAIN;
3543                ql_log(ql_log_warn, vha, 0x700c,
3544                    "qla2x00_alloc_iocbs failed.\n");
3545                goto done;
3546        }
3547
3548        switch (sp->type) {
3549        case SRB_LOGIN_CMD:
3550                IS_FWI2_CAPABLE(ha) ?
3551                    qla24xx_login_iocb(sp, pkt) :
3552                    qla2x00_login_iocb(sp, pkt);
3553                break;
3554        case SRB_PRLI_CMD:
3555                qla24xx_prli_iocb(sp, pkt);
3556                break;
3557        case SRB_LOGOUT_CMD:
3558                IS_FWI2_CAPABLE(ha) ?
3559                    qla24xx_logout_iocb(sp, pkt) :
3560                    qla2x00_logout_iocb(sp, pkt);
3561                break;
3562        case SRB_ELS_CMD_RPT:
3563        case SRB_ELS_CMD_HST:
3564                qla24xx_els_iocb(sp, pkt);
3565                break;
3566        case SRB_CT_CMD:
3567                IS_FWI2_CAPABLE(ha) ?
3568                    qla24xx_ct_iocb(sp, pkt) :
3569                    qla2x00_ct_iocb(sp, pkt);
3570                break;
3571        case SRB_ADISC_CMD:
3572                IS_FWI2_CAPABLE(ha) ?
3573                    qla24xx_adisc_iocb(sp, pkt) :
3574                    qla2x00_adisc_iocb(sp, pkt);
3575                break;
3576        case SRB_TM_CMD:
3577                IS_QLAFX00(ha) ?
3578                    qlafx00_tm_iocb(sp, pkt) :
3579                    qla24xx_tm_iocb(sp, pkt);
3580                break;
3581        case SRB_FXIOCB_DCMD:
3582        case SRB_FXIOCB_BCMD:
3583                qlafx00_fxdisc_iocb(sp, pkt);
3584                break;
3585        case SRB_NVME_LS:
3586                qla_nvme_ls(sp, pkt);
3587                break;
3588        case SRB_ABT_CMD:
3589                IS_QLAFX00(ha) ?
3590                        qlafx00_abort_iocb(sp, pkt) :
3591                        qla24xx_abort_iocb(sp, pkt);
3592                break;
3593        case SRB_ELS_DCMD:
3594                qla24xx_els_logo_iocb(sp, pkt);
3595                break;
3596        case SRB_CT_PTHRU_CMD:
3597                qla2x00_ctpthru_cmd_iocb(sp, pkt);
3598                break;
3599        case SRB_MB_IOCB:
3600                qla2x00_mb_iocb(sp, pkt);
3601                break;
3602        case SRB_NACK_PLOGI:
3603        case SRB_NACK_PRLI:
3604        case SRB_NACK_LOGO:
3605                qla2x00_send_notify_ack_iocb(sp, pkt);
3606                break;
3607        case SRB_CTRL_VP:
3608                qla25xx_ctrlvp_iocb(sp, pkt);
3609                break;
3610        case SRB_PRLO_CMD:
3611                qla24xx_prlo_iocb(sp, pkt);
3612                break;
3613        default:
3614                break;
3615        }
3616
3617        if (sp->start_timer)
3618                add_timer(&sp->u.iocb_cmd.timer);
3619
3620        wmb();
3621        qla2x00_start_iocbs(vha, qp->req);
3622done:
3623        spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3624        return rval;
3625}
3626
3627static void
3628qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3629                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3630{
3631        uint16_t avail_dsds;
3632        struct dsd64 *cur_dsd;
3633        uint32_t req_data_len = 0;
3634        uint32_t rsp_data_len = 0;
3635        struct scatterlist *sg;
3636        int index;
3637        int entry_count = 1;
3638        struct bsg_job *bsg_job = sp->u.bsg_job;
3639
3640        /*Update entry type to indicate bidir command */
3641        put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3642
3643        /* Set the transfer direction, in this set both flags
3644         * Also set the BD_WRAP_BACK flag, firmware will take care
3645         * assigning DID=SID for outgoing pkts.
3646         */
3647        cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3648        cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3649        cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3650                                                        BD_WRAP_BACK);
3651
3652        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3653        cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3654        cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3655        cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3656
3657        vha->bidi_stats.transfer_bytes += req_data_len;
3658        vha->bidi_stats.io_count++;
3659
3660        vha->qla_stats.output_bytes += req_data_len;
3661        vha->qla_stats.output_requests++;
3662
3663        /* Only one dsd is available for bidirectional IOCB, remaining dsds
3664         * are bundled in continuation iocb
3665         */
3666        avail_dsds = 1;
3667        cur_dsd = &cmd_pkt->fcp_dsd;
3668
3669        index = 0;
3670
3671        for_each_sg(bsg_job->request_payload.sg_list, sg,
3672                                bsg_job->request_payload.sg_cnt, index) {
3673                cont_a64_entry_t *cont_pkt;
3674
3675                /* Allocate additional continuation packets */
3676                if (avail_dsds == 0) {
3677                        /* Continuation type 1 IOCB can accomodate
3678                         * 5 DSDS
3679                         */
3680                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3681                        cur_dsd = cont_pkt->dsd;
3682                        avail_dsds = 5;
3683                        entry_count++;
3684                }
3685                append_dsd64(&cur_dsd, sg);
3686                avail_dsds--;
3687        }
3688        /* For read request DSD will always goes to continuation IOCB
3689         * and follow the write DSD. If there is room on the current IOCB
3690         * then it is added to that IOCB else new continuation IOCB is
3691         * allocated.
3692         */
3693        for_each_sg(bsg_job->reply_payload.sg_list, sg,
3694                                bsg_job->reply_payload.sg_cnt, index) {
3695                cont_a64_entry_t *cont_pkt;
3696
3697                /* Allocate additional continuation packets */
3698                if (avail_dsds == 0) {
3699                        /* Continuation type 1 IOCB can accomodate
3700                         * 5 DSDS
3701                         */
3702                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3703                        cur_dsd = cont_pkt->dsd;
3704                        avail_dsds = 5;
3705                        entry_count++;
3706                }
3707                append_dsd64(&cur_dsd, sg);
3708                avail_dsds--;
3709        }
3710        /* This value should be same as number of IOCB required for this cmd */
3711        cmd_pkt->entry_count = entry_count;
3712}
3713
3714int
3715qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3716{
3717
3718        struct qla_hw_data *ha = vha->hw;
3719        unsigned long flags;
3720        uint32_t handle;
3721        uint16_t req_cnt;
3722        uint16_t cnt;
3723        uint32_t *clr_ptr;
3724        struct cmd_bidir *cmd_pkt = NULL;
3725        struct rsp_que *rsp;
3726        struct req_que *req;
3727        int rval = EXT_STATUS_OK;
3728
3729        rval = QLA_SUCCESS;
3730
3731        rsp = ha->rsp_q_map[0];
3732        req = vha->req;
3733
3734        /* Send marker if required */
3735        if (vha->marker_needed != 0) {
3736                if (qla2x00_marker(vha, ha->base_qpair,
3737                        0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3738                        return EXT_STATUS_MAILBOX;
3739                vha->marker_needed = 0;
3740        }
3741
3742        /* Acquire ring specific lock */
3743        spin_lock_irqsave(&ha->hardware_lock, flags);
3744
3745        handle = qla2xxx_get_next_handle(req);
3746        if (handle == 0) {
3747                rval = EXT_STATUS_BUSY;
3748                goto queuing_error;
3749        }
3750
3751        /* Calculate number of IOCB required */
3752        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3753
3754        /* Check for room on request queue. */
3755        if (req->cnt < req_cnt + 2) {
3756                cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3757                    RD_REG_DWORD_RELAXED(req->req_q_out);
3758                if  (req->ring_index < cnt)
3759                        req->cnt = cnt - req->ring_index;
3760                else
3761                        req->cnt = req->length -
3762                                (req->ring_index - cnt);
3763        }
3764        if (req->cnt < req_cnt + 2) {
3765                rval = EXT_STATUS_BUSY;
3766                goto queuing_error;
3767        }
3768
3769        cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3770        cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3771
3772        /* Zero out remaining portion of packet. */
3773        /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3774        clr_ptr = (uint32_t *)cmd_pkt + 2;
3775        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3776
3777        /* Set NPORT-ID  (of vha)*/
3778        cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3779        cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3780        cmd_pkt->port_id[1] = vha->d_id.b.area;
3781        cmd_pkt->port_id[2] = vha->d_id.b.domain;
3782
3783        qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3784        cmd_pkt->entry_status = (uint8_t) rsp->id;
3785        /* Build command packet. */
3786        req->current_outstanding_cmd = handle;
3787        req->outstanding_cmds[handle] = sp;
3788        sp->handle = handle;
3789        req->cnt -= req_cnt;
3790
3791        /* Send the command to the firmware */
3792        wmb();
3793        qla2x00_start_iocbs(vha, req);
3794queuing_error:
3795        spin_unlock_irqrestore(&ha->hardware_lock, flags);
3796        return rval;
3797}
3798