linux/drivers/scsi/qla2xxx/qla_bsg.c
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2012 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8
   9#include <linux/kthread.h>
  10#include <linux/vmalloc.h>
  11#include <linux/delay.h>
  12
  13/* BSG support for ELS/CT pass through */
  14void
  15qla2x00_bsg_job_done(void *data, void *ptr, int res)
  16{
  17        srb_t *sp = (srb_t *)ptr;
  18        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
  19        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
  20
  21        bsg_job->reply->result = res;
  22        bsg_job->job_done(bsg_job);
  23        sp->free(vha, sp);
  24}
  25
  26void
  27qla2x00_bsg_sp_free(void *data, void *ptr)
  28{
  29        srb_t *sp = (srb_t *)ptr;
  30        struct scsi_qla_host *vha = sp->fcport->vha;
  31        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
  32        struct qla_hw_data *ha = vha->hw;
  33        struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
  34
  35        if (sp->type == SRB_FXIOCB_BCMD) {
  36                piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
  37                    &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  38
  39                if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
  40                        dma_unmap_sg(&ha->pdev->dev,
  41                            bsg_job->request_payload.sg_list,
  42                            bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  43
  44                if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
  45                        dma_unmap_sg(&ha->pdev->dev,
  46                            bsg_job->reply_payload.sg_list,
  47                            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  48        } else {
  49                dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  50                    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  51
  52                dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  53                    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  54        }
  55
  56        if (sp->type == SRB_CT_CMD ||
  57            sp->type == SRB_FXIOCB_BCMD ||
  58            sp->type == SRB_ELS_CMD_HST)
  59                kfree(sp->fcport);
  60        qla2x00_rel_sp(vha, sp);
  61}
  62
  63int
  64qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
  65        struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  66{
  67        int i, ret, num_valid;
  68        uint8_t *bcode;
  69        struct qla_fcp_prio_entry *pri_entry;
  70        uint32_t *bcode_val_ptr, bcode_val;
  71
  72        ret = 1;
  73        num_valid = 0;
  74        bcode = (uint8_t *)pri_cfg;
  75        bcode_val_ptr = (uint32_t *)pri_cfg;
  76        bcode_val = (uint32_t)(*bcode_val_ptr);
  77
  78        if (bcode_val == 0xFFFFFFFF) {
  79                /* No FCP Priority config data in flash */
  80                ql_dbg(ql_dbg_user, vha, 0x7051,
  81                    "No FCP Priority config data.\n");
  82                return 0;
  83        }
  84
  85        if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
  86                        bcode[3] != 'S') {
  87                /* Invalid FCP priority data header*/
  88                ql_dbg(ql_dbg_user, vha, 0x7052,
  89                    "Invalid FCP Priority data header. bcode=0x%x.\n",
  90                    bcode_val);
  91                return 0;
  92        }
  93        if (flag != 1)
  94                return ret;
  95
  96        pri_entry = &pri_cfg->entry[0];
  97        for (i = 0; i < pri_cfg->num_entries; i++) {
  98                if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  99                        num_valid++;
 100                pri_entry++;
 101        }
 102
 103        if (num_valid == 0) {
 104                /* No valid FCP priority data entries */
 105                ql_dbg(ql_dbg_user, vha, 0x7053,
 106                    "No valid FCP Priority data entries.\n");
 107                ret = 0;
 108        } else {
 109                /* FCP priority data is valid */
 110                ql_dbg(ql_dbg_user, vha, 0x7054,
 111                    "Valid FCP priority data. num entries = %d.\n",
 112                    num_valid);
 113        }
 114
 115        return ret;
 116}
 117
 118static int
 119qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 120{
 121        struct Scsi_Host *host = bsg_job->shost;
 122        scsi_qla_host_t *vha = shost_priv(host);
 123        struct qla_hw_data *ha = vha->hw;
 124        int ret = 0;
 125        uint32_t len;
 126        uint32_t oper;
 127
 128        if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
 129                ret = -EINVAL;
 130                goto exit_fcp_prio_cfg;
 131        }
 132
 133        /* Get the sub command */
 134        oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 135
 136        /* Only set config is allowed if config memory is not allocated */
 137        if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
 138                ret = -EINVAL;
 139                goto exit_fcp_prio_cfg;
 140        }
 141        switch (oper) {
 142        case QLFC_FCP_PRIO_DISABLE:
 143                if (ha->flags.fcp_prio_enabled) {
 144                        ha->flags.fcp_prio_enabled = 0;
 145                        ha->fcp_prio_cfg->attributes &=
 146                                ~FCP_PRIO_ATTR_ENABLE;
 147                        qla24xx_update_all_fcp_prio(vha);
 148                        bsg_job->reply->result = DID_OK;
 149                } else {
 150                        ret = -EINVAL;
 151                        bsg_job->reply->result = (DID_ERROR << 16);
 152                        goto exit_fcp_prio_cfg;
 153                }
 154                break;
 155
 156        case QLFC_FCP_PRIO_ENABLE:
 157                if (!ha->flags.fcp_prio_enabled) {
 158                        if (ha->fcp_prio_cfg) {
 159                                ha->flags.fcp_prio_enabled = 1;
 160                                ha->fcp_prio_cfg->attributes |=
 161                                    FCP_PRIO_ATTR_ENABLE;
 162                                qla24xx_update_all_fcp_prio(vha);
 163                                bsg_job->reply->result = DID_OK;
 164                        } else {
 165                                ret = -EINVAL;
 166                                bsg_job->reply->result = (DID_ERROR << 16);
 167                                goto exit_fcp_prio_cfg;
 168                        }
 169                }
 170                break;
 171
 172        case QLFC_FCP_PRIO_GET_CONFIG:
 173                len = bsg_job->reply_payload.payload_len;
 174                if (!len || len > FCP_PRIO_CFG_SIZE) {
 175                        ret = -EINVAL;
 176                        bsg_job->reply->result = (DID_ERROR << 16);
 177                        goto exit_fcp_prio_cfg;
 178                }
 179
 180                bsg_job->reply->result = DID_OK;
 181                bsg_job->reply->reply_payload_rcv_len =
 182                        sg_copy_from_buffer(
 183                        bsg_job->reply_payload.sg_list,
 184                        bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
 185                        len);
 186
 187                break;
 188
 189        case QLFC_FCP_PRIO_SET_CONFIG:
 190                len = bsg_job->request_payload.payload_len;
 191                if (!len || len > FCP_PRIO_CFG_SIZE) {
 192                        bsg_job->reply->result = (DID_ERROR << 16);
 193                        ret = -EINVAL;
 194                        goto exit_fcp_prio_cfg;
 195                }
 196
 197                if (!ha->fcp_prio_cfg) {
 198                        ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
 199                        if (!ha->fcp_prio_cfg) {
 200                                ql_log(ql_log_warn, vha, 0x7050,
 201                                    "Unable to allocate memory for fcp prio "
 202                                    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
 203                                bsg_job->reply->result = (DID_ERROR << 16);
 204                                ret = -ENOMEM;
 205                                goto exit_fcp_prio_cfg;
 206                        }
 207                }
 208
 209                memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
 210                sg_copy_to_buffer(bsg_job->request_payload.sg_list,
 211                bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
 212                        FCP_PRIO_CFG_SIZE);
 213
 214                /* validate fcp priority data */
 215
 216                if (!qla24xx_fcp_prio_cfg_valid(vha,
 217                    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
 218                        bsg_job->reply->result = (DID_ERROR << 16);
 219                        ret = -EINVAL;
 220                        /* If buffer was invalidatic int
 221                         * fcp_prio_cfg is of no use
 222                         */
 223                        vfree(ha->fcp_prio_cfg);
 224                        ha->fcp_prio_cfg = NULL;
 225                        goto exit_fcp_prio_cfg;
 226                }
 227
 228                ha->flags.fcp_prio_enabled = 0;
 229                if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
 230                        ha->flags.fcp_prio_enabled = 1;
 231                qla24xx_update_all_fcp_prio(vha);
 232                bsg_job->reply->result = DID_OK;
 233                break;
 234        default:
 235                ret = -EINVAL;
 236                break;
 237        }
 238exit_fcp_prio_cfg:
 239        if (!ret)
 240                bsg_job->job_done(bsg_job);
 241        return ret;
 242}
 243
 244static int
 245qla2x00_process_els(struct fc_bsg_job *bsg_job)
 246{
 247        struct fc_rport *rport;
 248        fc_port_t *fcport = NULL;
 249        struct Scsi_Host *host;
 250        scsi_qla_host_t *vha;
 251        struct qla_hw_data *ha;
 252        srb_t *sp;
 253        const char *type;
 254        int req_sg_cnt, rsp_sg_cnt;
 255        int rval =  (DRIVER_ERROR << 16);
 256        uint16_t nextlid = 0;
 257
 258        if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
 259                rport = bsg_job->rport;
 260                fcport = *(fc_port_t **) rport->dd_data;
 261                host = rport_to_shost(rport);
 262                vha = shost_priv(host);
 263                ha = vha->hw;
 264                type = "FC_BSG_RPT_ELS";
 265        } else {
 266                host = bsg_job->shost;
 267                vha = shost_priv(host);
 268                ha = vha->hw;
 269                type = "FC_BSG_HST_ELS_NOLOGIN";
 270        }
 271
 272        /* pass through is supported only for ISP 4Gb or higher */
 273        if (!IS_FWI2_CAPABLE(ha)) {
 274                ql_dbg(ql_dbg_user, vha, 0x7001,
 275                    "ELS passthru not supported for ISP23xx based adapters.\n");
 276                rval = -EPERM;
 277                goto done;
 278        }
 279
 280        /*  Multiple SG's are not supported for ELS requests */
 281        if (bsg_job->request_payload.sg_cnt > 1 ||
 282                bsg_job->reply_payload.sg_cnt > 1) {
 283                ql_dbg(ql_dbg_user, vha, 0x7002,
 284                    "Multiple SG's are not suppored for ELS requests, "
 285                    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
 286                    bsg_job->request_payload.sg_cnt,
 287                    bsg_job->reply_payload.sg_cnt);
 288                rval = -EPERM;
 289                goto done;
 290        }
 291
 292        /* ELS request for rport */
 293        if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
 294                /* make sure the rport is logged in,
 295                 * if not perform fabric login
 296                 */
 297                if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
 298                        ql_dbg(ql_dbg_user, vha, 0x7003,
 299                            "Failed to login port %06X for ELS passthru.\n",
 300                            fcport->d_id.b24);
 301                        rval = -EIO;
 302                        goto done;
 303                }
 304        } else {
 305                /* Allocate a dummy fcport structure, since functions
 306                 * preparing the IOCB and mailbox command retrieves port
 307                 * specific information from fcport structure. For Host based
 308                 * ELS commands there will be no fcport structure allocated
 309                 */
 310                fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 311                if (!fcport) {
 312                        rval = -ENOMEM;
 313                        goto done;
 314                }
 315
 316                /* Initialize all required  fields of fcport */
 317                fcport->vha = vha;
 318                fcport->d_id.b.al_pa =
 319                        bsg_job->request->rqst_data.h_els.port_id[0];
 320                fcport->d_id.b.area =
 321                        bsg_job->request->rqst_data.h_els.port_id[1];
 322                fcport->d_id.b.domain =
 323                        bsg_job->request->rqst_data.h_els.port_id[2];
 324                fcport->loop_id =
 325                        (fcport->d_id.b.al_pa == 0xFD) ?
 326                        NPH_FABRIC_CONTROLLER : NPH_F_PORT;
 327        }
 328
 329        if (!vha->flags.online) {
 330                ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
 331                rval = -EIO;
 332                goto done;
 333        }
 334
 335        req_sg_cnt =
 336                dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 337                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 338        if (!req_sg_cnt) {
 339                rval = -ENOMEM;
 340                goto done_free_fcport;
 341        }
 342
 343        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 344                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 345        if (!rsp_sg_cnt) {
 346                rval = -ENOMEM;
 347                goto done_free_fcport;
 348        }
 349
 350        if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
 351                (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
 352                ql_log(ql_log_warn, vha, 0x7008,
 353                    "dma mapping resulted in different sg counts, "
 354                    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
 355                    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
 356                    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
 357                rval = -EAGAIN;
 358                goto done_unmap_sg;
 359        }
 360
 361        /* Alloc SRB structure */
 362        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 363        if (!sp) {
 364                rval = -ENOMEM;
 365                goto done_unmap_sg;
 366        }
 367
 368        sp->type =
 369                (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
 370                SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
 371        sp->name =
 372                (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
 373                "bsg_els_rpt" : "bsg_els_hst");
 374        sp->u.bsg_job = bsg_job;
 375        sp->free = qla2x00_bsg_sp_free;
 376        sp->done = qla2x00_bsg_job_done;
 377
 378        ql_dbg(ql_dbg_user, vha, 0x700a,
 379            "bsg rqst type: %s els type: %x - loop-id=%x "
 380            "portid=%-2x%02x%02x.\n", type,
 381            bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
 382            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
 383
 384        rval = qla2x00_start_sp(sp);
 385        if (rval != QLA_SUCCESS) {
 386                ql_log(ql_log_warn, vha, 0x700e,
 387                    "qla2x00_start_sp failed = %d\n", rval);
 388                qla2x00_rel_sp(vha, sp);
 389                rval = -EIO;
 390                goto done_unmap_sg;
 391        }
 392        return rval;
 393
 394done_unmap_sg:
 395        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 396                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 397        dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 398                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 399        goto done_free_fcport;
 400
 401done_free_fcport:
 402        if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
 403                kfree(fcport);
 404done:
 405        return rval;
 406}
 407
 408inline uint16_t
 409qla24xx_calc_ct_iocbs(uint16_t dsds)
 410{
 411        uint16_t iocbs;
 412
 413        iocbs = 1;
 414        if (dsds > 2) {
 415                iocbs += (dsds - 2) / 5;
 416                if ((dsds - 2) % 5)
 417                        iocbs++;
 418        }
 419        return iocbs;
 420}
 421
 422static int
 423qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 424{
 425        srb_t *sp;
 426        struct Scsi_Host *host = bsg_job->shost;
 427        scsi_qla_host_t *vha = shost_priv(host);
 428        struct qla_hw_data *ha = vha->hw;
 429        int rval = (DRIVER_ERROR << 16);
 430        int req_sg_cnt, rsp_sg_cnt;
 431        uint16_t loop_id;
 432        struct fc_port *fcport;
 433        char  *type = "FC_BSG_HST_CT";
 434
 435        req_sg_cnt =
 436                dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 437                        bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 438        if (!req_sg_cnt) {
 439                ql_log(ql_log_warn, vha, 0x700f,
 440                    "dma_map_sg return %d for request\n", req_sg_cnt);
 441                rval = -ENOMEM;
 442                goto done;
 443        }
 444
 445        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 446                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 447        if (!rsp_sg_cnt) {
 448                ql_log(ql_log_warn, vha, 0x7010,
 449                    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
 450                rval = -ENOMEM;
 451                goto done;
 452        }
 453
 454        if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
 455            (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
 456                ql_log(ql_log_warn, vha, 0x7011,
 457                    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
 458                    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
 459                    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
 460                rval = -EAGAIN;
 461                goto done_unmap_sg;
 462        }
 463
 464        if (!vha->flags.online) {
 465                ql_log(ql_log_warn, vha, 0x7012,
 466                    "Host is not online.\n");
 467                rval = -EIO;
 468                goto done_unmap_sg;
 469        }
 470
 471        loop_id =
 472                (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
 473                        >> 24;
 474        switch (loop_id) {
 475        case 0xFC:
 476                loop_id = cpu_to_le16(NPH_SNS);
 477                break;
 478        case 0xFA:
 479                loop_id = vha->mgmt_svr_loop_id;
 480                break;
 481        default:
 482                ql_dbg(ql_dbg_user, vha, 0x7013,
 483                    "Unknown loop id: %x.\n", loop_id);
 484                rval = -EINVAL;
 485                goto done_unmap_sg;
 486        }
 487
 488        /* Allocate a dummy fcport structure, since functions preparing the
 489         * IOCB and mailbox command retrieves port specific information
 490         * from fcport structure. For Host based ELS commands there will be
 491         * no fcport structure allocated
 492         */
 493        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 494        if (!fcport) {
 495                ql_log(ql_log_warn, vha, 0x7014,
 496                    "Failed to allocate fcport.\n");
 497                rval = -ENOMEM;
 498                goto done_unmap_sg;
 499        }
 500
 501        /* Initialize all required  fields of fcport */
 502        fcport->vha = vha;
 503        fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
 504        fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
 505        fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
 506        fcport->loop_id = loop_id;
 507
 508        /* Alloc SRB structure */
 509        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 510        if (!sp) {
 511                ql_log(ql_log_warn, vha, 0x7015,
 512                    "qla2x00_get_sp failed.\n");
 513                rval = -ENOMEM;
 514                goto done_free_fcport;
 515        }
 516
 517        sp->type = SRB_CT_CMD;
 518        sp->name = "bsg_ct";
 519        sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
 520        sp->u.bsg_job = bsg_job;
 521        sp->free = qla2x00_bsg_sp_free;
 522        sp->done = qla2x00_bsg_job_done;
 523
 524        ql_dbg(ql_dbg_user, vha, 0x7016,
 525            "bsg rqst type: %s else type: %x - "
 526            "loop-id=%x portid=%02x%02x%02x.\n", type,
 527            (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
 528            fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
 529            fcport->d_id.b.al_pa);
 530
 531        rval = qla2x00_start_sp(sp);
 532        if (rval != QLA_SUCCESS) {
 533                ql_log(ql_log_warn, vha, 0x7017,
 534                    "qla2x00_start_sp failed=%d.\n", rval);
 535                qla2x00_rel_sp(vha, sp);
 536                rval = -EIO;
 537                goto done_free_fcport;
 538        }
 539        return rval;
 540
 541done_free_fcport:
 542        kfree(fcport);
 543done_unmap_sg:
 544        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 545                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 546        dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 547                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 548done:
 549        return rval;
 550}
 551
 552/* Disable loopback mode */
 553static inline int
 554qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 555                            int wait, int wait2)
 556{
 557        int ret = 0;
 558        int rval = 0;
 559        uint16_t new_config[4];
 560        struct qla_hw_data *ha = vha->hw;
 561
 562        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 563                goto done_reset_internal;
 564
 565        memset(new_config, 0 , sizeof(new_config));
 566        if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
 567            ENABLE_INTERNAL_LOOPBACK ||
 568            (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
 569            ENABLE_EXTERNAL_LOOPBACK) {
 570                new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
 571                ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
 572                    (new_config[0] & INTERNAL_LOOPBACK_MASK));
 573                memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
 574
 575                ha->notify_dcbx_comp = wait;
 576                ha->notify_lb_portup_comp = wait2;
 577
 578                ret = qla81xx_set_port_config(vha, new_config);
 579                if (ret != QLA_SUCCESS) {
 580                        ql_log(ql_log_warn, vha, 0x7025,
 581                            "Set port config failed.\n");
 582                        ha->notify_dcbx_comp = 0;
 583                        ha->notify_lb_portup_comp = 0;
 584                        rval = -EINVAL;
 585                        goto done_reset_internal;
 586                }
 587
 588                /* Wait for DCBX complete event */
 589                if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
 590                        (DCBX_COMP_TIMEOUT * HZ))) {
 591                        ql_dbg(ql_dbg_user, vha, 0x7026,
 592                            "DCBX completion not received.\n");
 593                        ha->notify_dcbx_comp = 0;
 594                        ha->notify_lb_portup_comp = 0;
 595                        rval = -EINVAL;
 596                        goto done_reset_internal;
 597                } else
 598                        ql_dbg(ql_dbg_user, vha, 0x7027,
 599                            "DCBX completion received.\n");
 600
 601                if (wait2 &&
 602                    !wait_for_completion_timeout(&ha->lb_portup_comp,
 603                    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
 604                        ql_dbg(ql_dbg_user, vha, 0x70c5,
 605                            "Port up completion not received.\n");
 606                        ha->notify_lb_portup_comp = 0;
 607                        rval = -EINVAL;
 608                        goto done_reset_internal;
 609                } else
 610                        ql_dbg(ql_dbg_user, vha, 0x70c6,
 611                            "Port up completion received.\n");
 612
 613                ha->notify_dcbx_comp = 0;
 614                ha->notify_lb_portup_comp = 0;
 615        }
 616done_reset_internal:
 617        return rval;
 618}
 619
 620/*
 621 * Set the port configuration to enable the internal or external loopback
 622 * depending on the loopback mode.
 623 */
 624static inline int
 625qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 626        uint16_t *new_config, uint16_t mode)
 627{
 628        int ret = 0;
 629        int rval = 0;
 630        struct qla_hw_data *ha = vha->hw;
 631
 632        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 633                goto done_set_internal;
 634
 635        if (mode == INTERNAL_LOOPBACK)
 636                new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
 637        else if (mode == EXTERNAL_LOOPBACK)
 638                new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
 639        ql_dbg(ql_dbg_user, vha, 0x70be,
 640             "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
 641
 642        memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
 643
 644        ha->notify_dcbx_comp = 1;
 645        ret = qla81xx_set_port_config(vha, new_config);
 646        if (ret != QLA_SUCCESS) {
 647                ql_log(ql_log_warn, vha, 0x7021,
 648                    "set port config failed.\n");
 649                ha->notify_dcbx_comp = 0;
 650                rval = -EINVAL;
 651                goto done_set_internal;
 652        }
 653
 654        /* Wait for DCBX complete event */
 655        if (!wait_for_completion_timeout(&ha->dcbx_comp,
 656            (DCBX_COMP_TIMEOUT * HZ))) {
 657                ql_dbg(ql_dbg_user, vha, 0x7022,
 658                    "DCBX completion not received.\n");
 659                ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
 660                /*
 661                 * If the reset of the loopback mode doesn't work take a FCoE
 662                 * dump and reset the chip.
 663                 */
 664                if (ret) {
 665                        ha->isp_ops->fw_dump(vha, 0);
 666                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 667                }
 668                rval = -EINVAL;
 669        } else {
 670                if (ha->flags.idc_compl_status) {
 671                        ql_dbg(ql_dbg_user, vha, 0x70c3,
 672                            "Bad status in IDC Completion AEN\n");
 673                        rval = -EINVAL;
 674                        ha->flags.idc_compl_status = 0;
 675                } else
 676                        ql_dbg(ql_dbg_user, vha, 0x7023,
 677                            "DCBX completion received.\n");
 678        }
 679
 680        ha->notify_dcbx_comp = 0;
 681
 682done_set_internal:
 683        return rval;
 684}
 685
 686static int
 687qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 688{
 689        struct Scsi_Host *host = bsg_job->shost;
 690        scsi_qla_host_t *vha = shost_priv(host);
 691        struct qla_hw_data *ha = vha->hw;
 692        int rval;
 693        uint8_t command_sent;
 694        char *type;
 695        struct msg_echo_lb elreq;
 696        uint16_t response[MAILBOX_REGISTER_COUNT];
 697        uint16_t config[4], new_config[4];
 698        uint8_t *fw_sts_ptr;
 699        uint8_t *req_data = NULL;
 700        dma_addr_t req_data_dma;
 701        uint32_t req_data_len;
 702        uint8_t *rsp_data = NULL;
 703        dma_addr_t rsp_data_dma;
 704        uint32_t rsp_data_len;
 705
 706        if (!vha->flags.online) {
 707                ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
 708                return -EIO;
 709        }
 710
 711        elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
 712                bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
 713                DMA_TO_DEVICE);
 714
 715        if (!elreq.req_sg_cnt) {
 716                ql_log(ql_log_warn, vha, 0x701a,
 717                    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
 718                return -ENOMEM;
 719        }
 720
 721        elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
 722                bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
 723                DMA_FROM_DEVICE);
 724
 725        if (!elreq.rsp_sg_cnt) {
 726                ql_log(ql_log_warn, vha, 0x701b,
 727                    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
 728                rval = -ENOMEM;
 729                goto done_unmap_req_sg;
 730        }
 731
 732        if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
 733                (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
 734                ql_log(ql_log_warn, vha, 0x701c,
 735                    "dma mapping resulted in different sg counts, "
 736                    "request_sg_cnt: %x dma_request_sg_cnt: %x "
 737                    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
 738                    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
 739                    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
 740                rval = -EAGAIN;
 741                goto done_unmap_sg;
 742        }
 743        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
 744        req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
 745                &req_data_dma, GFP_KERNEL);
 746        if (!req_data) {
 747                ql_log(ql_log_warn, vha, 0x701d,
 748                    "dma alloc failed for req_data.\n");
 749                rval = -ENOMEM;
 750                goto done_unmap_sg;
 751        }
 752
 753        rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
 754                &rsp_data_dma, GFP_KERNEL);
 755        if (!rsp_data) {
 756                ql_log(ql_log_warn, vha, 0x7004,
 757                    "dma alloc failed for rsp_data.\n");
 758                rval = -ENOMEM;
 759                goto done_free_dma_req;
 760        }
 761
 762        /* Copy the request buffer in req_data now */
 763        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
 764                bsg_job->request_payload.sg_cnt, req_data, req_data_len);
 765
 766        elreq.send_dma = req_data_dma;
 767        elreq.rcv_dma = rsp_data_dma;
 768        elreq.transfer_size = req_data_len;
 769
 770        elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 771        elreq.iteration_count =
 772            bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
 773
 774        if (atomic_read(&vha->loop_state) == LOOP_READY &&
 775            (ha->current_topology == ISP_CFG_F ||
 776            ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
 777            le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
 778            && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
 779                elreq.options == EXTERNAL_LOOPBACK) {
 780                type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
 781                ql_dbg(ql_dbg_user, vha, 0x701e,
 782                    "BSG request type: %s.\n", type);
 783                command_sent = INT_DEF_LB_ECHO_CMD;
 784                rval = qla2x00_echo_test(vha, &elreq, response);
 785        } else {
 786                if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
 787                        memset(config, 0, sizeof(config));
 788                        memset(new_config, 0, sizeof(new_config));
 789
 790                        if (qla81xx_get_port_config(vha, config)) {
 791                                ql_log(ql_log_warn, vha, 0x701f,
 792                                    "Get port config failed.\n");
 793                                rval = -EPERM;
 794                                goto done_free_dma_rsp;
 795                        }
 796
 797                        if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
 798                                ql_dbg(ql_dbg_user, vha, 0x70c4,
 799                                    "Loopback operation already in "
 800                                    "progress.\n");
 801                                rval = -EAGAIN;
 802                                goto done_free_dma_rsp;
 803                        }
 804
 805                        ql_dbg(ql_dbg_user, vha, 0x70c0,
 806                            "elreq.options=%04x\n", elreq.options);
 807
 808                        if (elreq.options == EXTERNAL_LOOPBACK)
 809                                if (IS_QLA8031(ha))
 810                                        rval = qla81xx_set_loopback_mode(vha,
 811                                            config, new_config, elreq.options);
 812                                else
 813                                        rval = qla81xx_reset_loopback_mode(vha,
 814                                            config, 1, 0);
 815                        else
 816                                rval = qla81xx_set_loopback_mode(vha, config,
 817                                    new_config, elreq.options);
 818
 819                        if (rval) {
 820                                rval = -EPERM;
 821                                goto done_free_dma_rsp;
 822                        }
 823
 824                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
 825                        ql_dbg(ql_dbg_user, vha, 0x7028,
 826                            "BSG request type: %s.\n", type);
 827
 828                        command_sent = INT_DEF_LB_LOOPBACK_CMD;
 829                        rval = qla2x00_loopback_test(vha, &elreq, response);
 830
 831                        if (response[0] == MBS_COMMAND_ERROR &&
 832                                        response[1] == MBS_LB_RESET) {
 833                                ql_log(ql_log_warn, vha, 0x7029,
 834                                    "MBX command error, Aborting ISP.\n");
 835                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 836                                qla2xxx_wake_dpc(vha);
 837                                qla2x00_wait_for_chip_reset(vha);
 838                                /* Also reset the MPI */
 839                                if (IS_QLA81XX(ha)) {
 840                                        if (qla81xx_restart_mpi_firmware(vha) !=
 841                                            QLA_SUCCESS) {
 842                                                ql_log(ql_log_warn, vha, 0x702a,
 843                                                    "MPI reset failed.\n");
 844                                        }
 845                                }
 846
 847                                rval = -EIO;
 848                                goto done_free_dma_rsp;
 849                        }
 850
 851                        if (new_config[0]) {
 852                                int ret;
 853
 854                                /* Revert back to original port config
 855                                 * Also clear internal loopback
 856                                 */
 857                                ret = qla81xx_reset_loopback_mode(vha,
 858                                    new_config, 0, 1);
 859                                if (ret) {
 860                                        /*
 861                                         * If the reset of the loopback mode
 862                                         * doesn't work take FCoE dump and then
 863                                         * reset the chip.
 864                                         */
 865                                        ha->isp_ops->fw_dump(vha, 0);
 866                                        set_bit(ISP_ABORT_NEEDED,
 867                                            &vha->dpc_flags);
 868                                }
 869
 870                        }
 871
 872                } else {
 873                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
 874                        ql_dbg(ql_dbg_user, vha, 0x702b,
 875                            "BSG request type: %s.\n", type);
 876                        command_sent = INT_DEF_LB_LOOPBACK_CMD;
 877                        rval = qla2x00_loopback_test(vha, &elreq, response);
 878                }
 879        }
 880
 881        if (rval) {
 882                ql_log(ql_log_warn, vha, 0x702c,
 883                    "Vendor request %s failed.\n", type);
 884
 885                rval = 0;
 886                bsg_job->reply->result = (DID_ERROR << 16);
 887                bsg_job->reply->reply_payload_rcv_len = 0;
 888        } else {
 889                ql_dbg(ql_dbg_user, vha, 0x702d,
 890                    "Vendor request %s completed.\n", type);
 891                bsg_job->reply->result = (DID_OK << 16);
 892                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 893                        bsg_job->reply_payload.sg_cnt, rsp_data,
 894                        rsp_data_len);
 895        }
 896
 897        bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
 898            sizeof(response) + sizeof(uint8_t);
 899        fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
 900            sizeof(struct fc_bsg_reply);
 901        memcpy(fw_sts_ptr, response, sizeof(response));
 902        fw_sts_ptr += sizeof(response);
 903        *fw_sts_ptr = command_sent;
 904
 905done_free_dma_rsp:
 906        dma_free_coherent(&ha->pdev->dev, rsp_data_len,
 907                rsp_data, rsp_data_dma);
 908done_free_dma_req:
 909        dma_free_coherent(&ha->pdev->dev, req_data_len,
 910                req_data, req_data_dma);
 911done_unmap_sg:
 912        dma_unmap_sg(&ha->pdev->dev,
 913            bsg_job->reply_payload.sg_list,
 914            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 915done_unmap_req_sg:
 916        dma_unmap_sg(&ha->pdev->dev,
 917            bsg_job->request_payload.sg_list,
 918            bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 919        if (!rval)
 920                bsg_job->job_done(bsg_job);
 921        return rval;
 922}
 923
 924static int
 925qla84xx_reset(struct fc_bsg_job *bsg_job)
 926{
 927        struct Scsi_Host *host = bsg_job->shost;
 928        scsi_qla_host_t *vha = shost_priv(host);
 929        struct qla_hw_data *ha = vha->hw;
 930        int rval = 0;
 931        uint32_t flag;
 932
 933        if (!IS_QLA84XX(ha)) {
 934                ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
 935                return -EINVAL;
 936        }
 937
 938        flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 939
 940        rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
 941
 942        if (rval) {
 943                ql_log(ql_log_warn, vha, 0x7030,
 944                    "Vendor request 84xx reset failed.\n");
 945                rval = (DID_ERROR << 16);
 946
 947        } else {
 948                ql_dbg(ql_dbg_user, vha, 0x7031,
 949                    "Vendor request 84xx reset completed.\n");
 950                bsg_job->reply->result = DID_OK;
 951                bsg_job->job_done(bsg_job);
 952        }
 953
 954        return rval;
 955}
 956
 957static int
 958qla84xx_updatefw(struct fc_bsg_job *bsg_job)
 959{
 960        struct Scsi_Host *host = bsg_job->shost;
 961        scsi_qla_host_t *vha = shost_priv(host);
 962        struct qla_hw_data *ha = vha->hw;
 963        struct verify_chip_entry_84xx *mn = NULL;
 964        dma_addr_t mn_dma, fw_dma;
 965        void *fw_buf = NULL;
 966        int rval = 0;
 967        uint32_t sg_cnt;
 968        uint32_t data_len;
 969        uint16_t options;
 970        uint32_t flag;
 971        uint32_t fw_ver;
 972
 973        if (!IS_QLA84XX(ha)) {
 974                ql_dbg(ql_dbg_user, vha, 0x7032,
 975                    "Not 84xx, exiting.\n");
 976                return -EINVAL;
 977        }
 978
 979        sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 980                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 981        if (!sg_cnt) {
 982                ql_log(ql_log_warn, vha, 0x7033,
 983                    "dma_map_sg returned %d for request.\n", sg_cnt);
 984                return -ENOMEM;
 985        }
 986
 987        if (sg_cnt != bsg_job->request_payload.sg_cnt) {
 988                ql_log(ql_log_warn, vha, 0x7034,
 989                    "DMA mapping resulted in different sg counts, "
 990                    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
 991                    bsg_job->request_payload.sg_cnt, sg_cnt);
 992                rval = -EAGAIN;
 993                goto done_unmap_sg;
 994        }
 995
 996        data_len = bsg_job->request_payload.payload_len;
 997        fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
 998                &fw_dma, GFP_KERNEL);
 999        if (!fw_buf) {
1000                ql_log(ql_log_warn, vha, 0x7035,
1001                    "DMA alloc failed for fw_buf.\n");
1002                rval = -ENOMEM;
1003                goto done_unmap_sg;
1004        }
1005
1006        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1007                bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1008
1009        mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1010        if (!mn) {
1011                ql_log(ql_log_warn, vha, 0x7036,
1012                    "DMA alloc failed for fw buffer.\n");
1013                rval = -ENOMEM;
1014                goto done_free_fw_buf;
1015        }
1016
1017        flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1018        fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1019
1020        memset(mn, 0, sizeof(struct access_chip_84xx));
1021        mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1022        mn->entry_count = 1;
1023
1024        options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1025        if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1026                options |= VCO_DIAG_FW;
1027
1028        mn->options = cpu_to_le16(options);
1029        mn->fw_ver =  cpu_to_le32(fw_ver);
1030        mn->fw_size =  cpu_to_le32(data_len);
1031        mn->fw_seq_size =  cpu_to_le32(data_len);
1032        mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1033        mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1034        mn->dseg_length = cpu_to_le32(data_len);
1035        mn->data_seg_cnt = cpu_to_le16(1);
1036
1037        rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1038
1039        if (rval) {
1040                ql_log(ql_log_warn, vha, 0x7037,
1041                    "Vendor request 84xx updatefw failed.\n");
1042
1043                rval = (DID_ERROR << 16);
1044        } else {
1045                ql_dbg(ql_dbg_user, vha, 0x7038,
1046                    "Vendor request 84xx updatefw completed.\n");
1047
1048                bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1049                bsg_job->reply->result = DID_OK;
1050        }
1051
1052        dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1053
1054done_free_fw_buf:
1055        dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1056
1057done_unmap_sg:
1058        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1059                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1060
1061        if (!rval)
1062                bsg_job->job_done(bsg_job);
1063        return rval;
1064}
1065
1066static int
1067qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1068{
1069        struct Scsi_Host *host = bsg_job->shost;
1070        scsi_qla_host_t *vha = shost_priv(host);
1071        struct qla_hw_data *ha = vha->hw;
1072        struct access_chip_84xx *mn = NULL;
1073        dma_addr_t mn_dma, mgmt_dma;
1074        void *mgmt_b = NULL;
1075        int rval = 0;
1076        struct qla_bsg_a84_mgmt *ql84_mgmt;
1077        uint32_t sg_cnt;
1078        uint32_t data_len = 0;
1079        uint32_t dma_direction = DMA_NONE;
1080
1081        if (!IS_QLA84XX(ha)) {
1082                ql_log(ql_log_warn, vha, 0x703a,
1083                    "Not 84xx, exiting.\n");
1084                return -EINVAL;
1085        }
1086
1087        ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1088                sizeof(struct fc_bsg_request));
1089        if (!ql84_mgmt) {
1090                ql_log(ql_log_warn, vha, 0x703b,
1091                    "MGMT header not provided, exiting.\n");
1092                return -EINVAL;
1093        }
1094
1095        mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1096        if (!mn) {
1097                ql_log(ql_log_warn, vha, 0x703c,
1098                    "DMA alloc failed for fw buffer.\n");
1099                return -ENOMEM;
1100        }
1101
1102        memset(mn, 0, sizeof(struct access_chip_84xx));
1103        mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1104        mn->entry_count = 1;
1105
1106        switch (ql84_mgmt->mgmt.cmd) {
1107        case QLA84_MGMT_READ_MEM:
1108        case QLA84_MGMT_GET_INFO:
1109                sg_cnt = dma_map_sg(&ha->pdev->dev,
1110                        bsg_job->reply_payload.sg_list,
1111                        bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1112                if (!sg_cnt) {
1113                        ql_log(ql_log_warn, vha, 0x703d,
1114                            "dma_map_sg returned %d for reply.\n", sg_cnt);
1115                        rval = -ENOMEM;
1116                        goto exit_mgmt;
1117                }
1118
1119                dma_direction = DMA_FROM_DEVICE;
1120
1121                if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1122                        ql_log(ql_log_warn, vha, 0x703e,
1123                            "DMA mapping resulted in different sg counts, "
1124                            "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1125                            bsg_job->reply_payload.sg_cnt, sg_cnt);
1126                        rval = -EAGAIN;
1127                        goto done_unmap_sg;
1128                }
1129
1130                data_len = bsg_job->reply_payload.payload_len;
1131
1132                mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1133                    &mgmt_dma, GFP_KERNEL);
1134                if (!mgmt_b) {
1135                        ql_log(ql_log_warn, vha, 0x703f,
1136                            "DMA alloc failed for mgmt_b.\n");
1137                        rval = -ENOMEM;
1138                        goto done_unmap_sg;
1139                }
1140
1141                if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1142                        mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1143                        mn->parameter1 =
1144                                cpu_to_le32(
1145                                ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1146
1147                } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1148                        mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1149                        mn->parameter1 =
1150                                cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1151
1152                        mn->parameter2 =
1153                                cpu_to_le32(
1154                                ql84_mgmt->mgmt.mgmtp.u.info.context);
1155                }
1156                break;
1157
1158        case QLA84_MGMT_WRITE_MEM:
1159                sg_cnt = dma_map_sg(&ha->pdev->dev,
1160                        bsg_job->request_payload.sg_list,
1161                        bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1162
1163                if (!sg_cnt) {
1164                        ql_log(ql_log_warn, vha, 0x7040,
1165                            "dma_map_sg returned %d.\n", sg_cnt);
1166                        rval = -ENOMEM;
1167                        goto exit_mgmt;
1168                }
1169
1170                dma_direction = DMA_TO_DEVICE;
1171
1172                if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1173                        ql_log(ql_log_warn, vha, 0x7041,
1174                            "DMA mapping resulted in different sg counts, "
1175                            "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1176                            bsg_job->request_payload.sg_cnt, sg_cnt);
1177                        rval = -EAGAIN;
1178                        goto done_unmap_sg;
1179                }
1180
1181                data_len = bsg_job->request_payload.payload_len;
1182                mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1183                        &mgmt_dma, GFP_KERNEL);
1184                if (!mgmt_b) {
1185                        ql_log(ql_log_warn, vha, 0x7042,
1186                            "DMA alloc failed for mgmt_b.\n");
1187                        rval = -ENOMEM;
1188                        goto done_unmap_sg;
1189                }
1190
1191                sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1192                        bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1193
1194                mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1195                mn->parameter1 =
1196                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1197                break;
1198
1199        case QLA84_MGMT_CHNG_CONFIG:
1200                mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1201                mn->parameter1 =
1202                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1203
1204                mn->parameter2 =
1205                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1206
1207                mn->parameter3 =
1208                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1209                break;
1210
1211        default:
1212                rval = -EIO;
1213                goto exit_mgmt;
1214        }
1215
1216        if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1217                mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1218                mn->dseg_count = cpu_to_le16(1);
1219                mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1220                mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1221                mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1222        }
1223
1224        rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1225
1226        if (rval) {
1227                ql_log(ql_log_warn, vha, 0x7043,
1228                    "Vendor request 84xx mgmt failed.\n");
1229
1230                rval = (DID_ERROR << 16);
1231
1232        } else {
1233                ql_dbg(ql_dbg_user, vha, 0x7044,
1234                    "Vendor request 84xx mgmt completed.\n");
1235
1236                bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1237                bsg_job->reply->result = DID_OK;
1238
1239                if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1240                        (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1241                        bsg_job->reply->reply_payload_rcv_len =
1242                                bsg_job->reply_payload.payload_len;
1243
1244                        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1245                                bsg_job->reply_payload.sg_cnt, mgmt_b,
1246                                data_len);
1247                }
1248        }
1249
1250done_unmap_sg:
1251        if (mgmt_b)
1252                dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1253
1254        if (dma_direction == DMA_TO_DEVICE)
1255                dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1256                        bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1257        else if (dma_direction == DMA_FROM_DEVICE)
1258                dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1259                        bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1260
1261exit_mgmt:
1262        dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1263
1264        if (!rval)
1265                bsg_job->job_done(bsg_job);
1266        return rval;
1267}
1268
1269static int
1270qla24xx_iidma(struct fc_bsg_job *bsg_job)
1271{
1272        struct Scsi_Host *host = bsg_job->shost;
1273        scsi_qla_host_t *vha = shost_priv(host);
1274        int rval = 0;
1275        struct qla_port_param *port_param = NULL;
1276        fc_port_t *fcport = NULL;
1277        uint16_t mb[MAILBOX_REGISTER_COUNT];
1278        uint8_t *rsp_ptr = NULL;
1279
1280        if (!IS_IIDMA_CAPABLE(vha->hw)) {
1281                ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1282                return -EINVAL;
1283        }
1284
1285        port_param = (struct qla_port_param *)((char *)bsg_job->request +
1286                sizeof(struct fc_bsg_request));
1287        if (!port_param) {
1288                ql_log(ql_log_warn, vha, 0x7047,
1289                    "port_param header not provided.\n");
1290                return -EINVAL;
1291        }
1292
1293        if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1294                ql_log(ql_log_warn, vha, 0x7048,
1295                    "Invalid destination type.\n");
1296                return -EINVAL;
1297        }
1298
1299        list_for_each_entry(fcport, &vha->vp_fcports, list) {
1300                if (fcport->port_type != FCT_TARGET)
1301                        continue;
1302
1303                if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1304                        fcport->port_name, sizeof(fcport->port_name)))
1305                        continue;
1306                break;
1307        }
1308
1309        if (!fcport) {
1310                ql_log(ql_log_warn, vha, 0x7049,
1311                    "Failed to find port.\n");
1312                return -EINVAL;
1313        }
1314
1315        if (atomic_read(&fcport->state) != FCS_ONLINE) {
1316                ql_log(ql_log_warn, vha, 0x704a,
1317                    "Port is not online.\n");
1318                return -EINVAL;
1319        }
1320
1321        if (fcport->flags & FCF_LOGIN_NEEDED) {
1322                ql_log(ql_log_warn, vha, 0x704b,
1323                    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1324                return -EINVAL;
1325        }
1326
1327        if (port_param->mode)
1328                rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1329                        port_param->speed, mb);
1330        else
1331                rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1332                        &port_param->speed, mb);
1333
1334        if (rval) {
1335                ql_log(ql_log_warn, vha, 0x704c,
1336                    "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1337                    "%04x %x %04x %04x.\n", fcport->port_name[0],
1338                    fcport->port_name[1], fcport->port_name[2],
1339                    fcport->port_name[3], fcport->port_name[4],
1340                    fcport->port_name[5], fcport->port_name[6],
1341                    fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1342                rval = (DID_ERROR << 16);
1343        } else {
1344                if (!port_param->mode) {
1345                        bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1346                                sizeof(struct qla_port_param);
1347
1348                        rsp_ptr = ((uint8_t *)bsg_job->reply) +
1349                                sizeof(struct fc_bsg_reply);
1350
1351                        memcpy(rsp_ptr, port_param,
1352                                sizeof(struct qla_port_param));
1353                }
1354
1355                bsg_job->reply->result = DID_OK;
1356                bsg_job->job_done(bsg_job);
1357        }
1358
1359        return rval;
1360}
1361
1362static int
1363qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1364        uint8_t is_update)
1365{
1366        uint32_t start = 0;
1367        int valid = 0;
1368        struct qla_hw_data *ha = vha->hw;
1369
1370        if (unlikely(pci_channel_offline(ha->pdev)))
1371                return -EINVAL;
1372
1373        start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1374        if (start > ha->optrom_size) {
1375                ql_log(ql_log_warn, vha, 0x7055,
1376                    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1377                return -EINVAL;
1378        }
1379
1380        if (ha->optrom_state != QLA_SWAITING) {
1381                ql_log(ql_log_info, vha, 0x7056,
1382                    "optrom_state %d.\n", ha->optrom_state);
1383                return -EBUSY;
1384        }
1385
1386        ha->optrom_region_start = start;
1387        ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1388        if (is_update) {
1389                if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1390                        valid = 1;
1391                else if (start == (ha->flt_region_boot * 4) ||
1392                    start == (ha->flt_region_fw * 4))
1393                        valid = 1;
1394                else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1395                    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1396                        valid = 1;
1397                if (!valid) {
1398                        ql_log(ql_log_warn, vha, 0x7058,
1399                            "Invalid start region 0x%x/0x%x.\n", start,
1400                            bsg_job->request_payload.payload_len);
1401                        return -EINVAL;
1402                }
1403
1404                ha->optrom_region_size = start +
1405                    bsg_job->request_payload.payload_len > ha->optrom_size ?
1406                    ha->optrom_size - start :
1407                    bsg_job->request_payload.payload_len;
1408                ha->optrom_state = QLA_SWRITING;
1409        } else {
1410                ha->optrom_region_size = start +
1411                    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1412                    ha->optrom_size - start :
1413                    bsg_job->reply_payload.payload_len;
1414                ha->optrom_state = QLA_SREADING;
1415        }
1416
1417        ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1418        if (!ha->optrom_buffer) {
1419                ql_log(ql_log_warn, vha, 0x7059,
1420                    "Read: Unable to allocate memory for optrom retrieval "
1421                    "(%x)\n", ha->optrom_region_size);
1422
1423                ha->optrom_state = QLA_SWAITING;
1424                return -ENOMEM;
1425        }
1426
1427        memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1428        return 0;
1429}
1430
1431static int
1432qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1433{
1434        struct Scsi_Host *host = bsg_job->shost;
1435        scsi_qla_host_t *vha = shost_priv(host);
1436        struct qla_hw_data *ha = vha->hw;
1437        int rval = 0;
1438
1439        if (ha->flags.nic_core_reset_hdlr_active)
1440                return -EBUSY;
1441
1442        rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1443        if (rval)
1444                return rval;
1445
1446        ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1447            ha->optrom_region_start, ha->optrom_region_size);
1448
1449        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1450            bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1451            ha->optrom_region_size);
1452
1453        bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1454        bsg_job->reply->result = DID_OK;
1455        vfree(ha->optrom_buffer);
1456        ha->optrom_buffer = NULL;
1457        ha->optrom_state = QLA_SWAITING;
1458        bsg_job->job_done(bsg_job);
1459        return rval;
1460}
1461
1462static int
1463qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1464{
1465        struct Scsi_Host *host = bsg_job->shost;
1466        scsi_qla_host_t *vha = shost_priv(host);
1467        struct qla_hw_data *ha = vha->hw;
1468        int rval = 0;
1469
1470        rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1471        if (rval)
1472                return rval;
1473
1474        /* Set the isp82xx_no_md_cap not to capture minidump */
1475        ha->flags.isp82xx_no_md_cap = 1;
1476
1477        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1478            bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1479            ha->optrom_region_size);
1480
1481        ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1482            ha->optrom_region_start, ha->optrom_region_size);
1483
1484        bsg_job->reply->result = DID_OK;
1485        vfree(ha->optrom_buffer);
1486        ha->optrom_buffer = NULL;
1487        ha->optrom_state = QLA_SWAITING;
1488        bsg_job->job_done(bsg_job);
1489        return rval;
1490}
1491
1492static int
1493qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1494{
1495        struct Scsi_Host *host = bsg_job->shost;
1496        scsi_qla_host_t *vha = shost_priv(host);
1497        struct qla_hw_data *ha = vha->hw;
1498        int rval = 0;
1499        uint8_t bsg[DMA_POOL_SIZE];
1500        struct qla_image_version_list *list = (void *)bsg;
1501        struct qla_image_version *image;
1502        uint32_t count;
1503        dma_addr_t sfp_dma;
1504        void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1505        if (!sfp) {
1506                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1507                    EXT_STATUS_NO_MEMORY;
1508                goto done;
1509        }
1510
1511        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1512            bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1513
1514        image = list->version;
1515        count = list->count;
1516        while (count--) {
1517                memcpy(sfp, &image->field_info, sizeof(image->field_info));
1518                rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1519                    image->field_address.device, image->field_address.offset,
1520                    sizeof(image->field_info), image->field_address.option);
1521                if (rval) {
1522                        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1523                            EXT_STATUS_MAILBOX;
1524                        goto dealloc;
1525                }
1526                image++;
1527        }
1528
1529        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1530
1531dealloc:
1532        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1533
1534done:
1535        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1536        bsg_job->reply->result = DID_OK << 16;
1537        bsg_job->job_done(bsg_job);
1538
1539        return 0;
1540}
1541
1542static int
1543qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1544{
1545        struct Scsi_Host *host = bsg_job->shost;
1546        scsi_qla_host_t *vha = shost_priv(host);
1547        struct qla_hw_data *ha = vha->hw;
1548        int rval = 0;
1549        uint8_t bsg[DMA_POOL_SIZE];
1550        struct qla_status_reg *sr = (void *)bsg;
1551        dma_addr_t sfp_dma;
1552        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1553        if (!sfp) {
1554                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1555                    EXT_STATUS_NO_MEMORY;
1556                goto done;
1557        }
1558
1559        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1560            bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1561
1562        rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1563            sr->field_address.device, sr->field_address.offset,
1564            sizeof(sr->status_reg), sr->field_address.option);
1565        sr->status_reg = *sfp;
1566
1567        if (rval) {
1568                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1569                    EXT_STATUS_MAILBOX;
1570                goto dealloc;
1571        }
1572
1573        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1574            bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1575
1576        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1577
1578dealloc:
1579        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1580
1581done:
1582        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1583        bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1584        bsg_job->reply->result = DID_OK << 16;
1585        bsg_job->job_done(bsg_job);
1586
1587        return 0;
1588}
1589
1590static int
1591qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1592{
1593        struct Scsi_Host *host = bsg_job->shost;
1594        scsi_qla_host_t *vha = shost_priv(host);
1595        struct qla_hw_data *ha = vha->hw;
1596        int rval = 0;
1597        uint8_t bsg[DMA_POOL_SIZE];
1598        struct qla_status_reg *sr = (void *)bsg;
1599        dma_addr_t sfp_dma;
1600        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1601        if (!sfp) {
1602                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1603                    EXT_STATUS_NO_MEMORY;
1604                goto done;
1605        }
1606
1607        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1608            bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1609
1610        *sfp = sr->status_reg;
1611        rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1612            sr->field_address.device, sr->field_address.offset,
1613            sizeof(sr->status_reg), sr->field_address.option);
1614
1615        if (rval) {
1616                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1617                    EXT_STATUS_MAILBOX;
1618                goto dealloc;
1619        }
1620
1621        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1622
1623dealloc:
1624        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1625
1626done:
1627        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1628        bsg_job->reply->result = DID_OK << 16;
1629        bsg_job->job_done(bsg_job);
1630
1631        return 0;
1632}
1633
1634static int
1635qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1636{
1637        struct Scsi_Host *host = bsg_job->shost;
1638        scsi_qla_host_t *vha = shost_priv(host);
1639        struct qla_hw_data *ha = vha->hw;
1640        int rval = 0;
1641        uint8_t bsg[DMA_POOL_SIZE];
1642        struct qla_i2c_access *i2c = (void *)bsg;
1643        dma_addr_t sfp_dma;
1644        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1645        if (!sfp) {
1646                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1647                    EXT_STATUS_NO_MEMORY;
1648                goto done;
1649        }
1650
1651        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1652            bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1653
1654        memcpy(sfp, i2c->buffer, i2c->length);
1655        rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1656            i2c->device, i2c->offset, i2c->length, i2c->option);
1657
1658        if (rval) {
1659                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1660                    EXT_STATUS_MAILBOX;
1661                goto dealloc;
1662        }
1663
1664        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1665
1666dealloc:
1667        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1668
1669done:
1670        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1671        bsg_job->reply->result = DID_OK << 16;
1672        bsg_job->job_done(bsg_job);
1673
1674        return 0;
1675}
1676
1677static int
1678qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1679{
1680        struct Scsi_Host *host = bsg_job->shost;
1681        scsi_qla_host_t *vha = shost_priv(host);
1682        struct qla_hw_data *ha = vha->hw;
1683        int rval = 0;
1684        uint8_t bsg[DMA_POOL_SIZE];
1685        struct qla_i2c_access *i2c = (void *)bsg;
1686        dma_addr_t sfp_dma;
1687        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1688        if (!sfp) {
1689                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1690                    EXT_STATUS_NO_MEMORY;
1691                goto done;
1692        }
1693
1694        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1695            bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1696
1697        rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1698                i2c->device, i2c->offset, i2c->length, i2c->option);
1699
1700        if (rval) {
1701                bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1702                    EXT_STATUS_MAILBOX;
1703                goto dealloc;
1704        }
1705
1706        memcpy(i2c->buffer, sfp, i2c->length);
1707        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1708            bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1709
1710        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1711
1712dealloc:
1713        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1714
1715done:
1716        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1717        bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1718        bsg_job->reply->result = DID_OK << 16;
1719        bsg_job->job_done(bsg_job);
1720
1721        return 0;
1722}
1723
1724static int
1725qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1726{
1727        struct Scsi_Host *host = bsg_job->shost;
1728        scsi_qla_host_t *vha = shost_priv(host);
1729        struct qla_hw_data *ha = vha->hw;
1730        uint16_t thread_id;
1731        uint32_t rval = EXT_STATUS_OK;
1732        uint16_t req_sg_cnt = 0;
1733        uint16_t rsp_sg_cnt = 0;
1734        uint16_t nextlid = 0;
1735        uint32_t tot_dsds;
1736        srb_t *sp = NULL;
1737        uint32_t req_data_len = 0;
1738        uint32_t rsp_data_len = 0;
1739
1740        /* Check the type of the adapter */
1741        if (!IS_BIDI_CAPABLE(ha)) {
1742                ql_log(ql_log_warn, vha, 0x70a0,
1743                        "This adapter is not supported\n");
1744                rval = EXT_STATUS_NOT_SUPPORTED;
1745                goto done;
1746        }
1747
1748        if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1749                test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1750                test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1751                rval =  EXT_STATUS_BUSY;
1752                goto done;
1753        }
1754
1755        /* Check if host is online */
1756        if (!vha->flags.online) {
1757                ql_log(ql_log_warn, vha, 0x70a1,
1758                        "Host is not online\n");
1759                rval = EXT_STATUS_DEVICE_OFFLINE;
1760                goto done;
1761        }
1762
1763        /* Check if cable is plugged in or not */
1764        if (vha->device_flags & DFLG_NO_CABLE) {
1765                ql_log(ql_log_warn, vha, 0x70a2,
1766                        "Cable is unplugged...\n");
1767                rval = EXT_STATUS_INVALID_CFG;
1768                goto done;
1769        }
1770
1771        /* Check if the switch is connected or not */
1772        if (ha->current_topology != ISP_CFG_F) {
1773                ql_log(ql_log_warn, vha, 0x70a3,
1774                        "Host is not connected to the switch\n");
1775                rval = EXT_STATUS_INVALID_CFG;
1776                goto done;
1777        }
1778
1779        /* Check if operating mode is P2P */
1780        if (ha->operating_mode != P2P) {
1781                ql_log(ql_log_warn, vha, 0x70a4,
1782                    "Host is operating mode is not P2p\n");
1783                rval = EXT_STATUS_INVALID_CFG;
1784                goto done;
1785        }
1786
1787        thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1788
1789        mutex_lock(&ha->selflogin_lock);
1790        if (vha->self_login_loop_id == 0) {
1791                /* Initialize all required  fields of fcport */
1792                vha->bidir_fcport.vha = vha;
1793                vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1794                vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1795                vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1796                vha->bidir_fcport.loop_id = vha->loop_id;
1797
1798                if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1799                        ql_log(ql_log_warn, vha, 0x70a7,
1800                            "Failed to login port %06X for bidirectional IOCB\n",
1801                            vha->bidir_fcport.d_id.b24);
1802                        mutex_unlock(&ha->selflogin_lock);
1803                        rval = EXT_STATUS_MAILBOX;
1804                        goto done;
1805                }
1806                vha->self_login_loop_id = nextlid - 1;
1807
1808        }
1809        /* Assign the self login loop id to fcport */
1810        mutex_unlock(&ha->selflogin_lock);
1811
1812        vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1813
1814        req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1815                bsg_job->request_payload.sg_list,
1816                bsg_job->request_payload.sg_cnt,
1817                DMA_TO_DEVICE);
1818
1819        if (!req_sg_cnt) {
1820                rval = EXT_STATUS_NO_MEMORY;
1821                goto done;
1822        }
1823
1824        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1825                bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1826                DMA_FROM_DEVICE);
1827
1828        if (!rsp_sg_cnt) {
1829                rval = EXT_STATUS_NO_MEMORY;
1830                goto done_unmap_req_sg;
1831        }
1832
1833        if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1834                (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1835                ql_dbg(ql_dbg_user, vha, 0x70a9,
1836                    "Dma mapping resulted in different sg counts "
1837                    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1838                    "%x dma_reply_sg_cnt: %x]\n",
1839                    bsg_job->request_payload.sg_cnt, req_sg_cnt,
1840                    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1841                rval = EXT_STATUS_NO_MEMORY;
1842                goto done_unmap_sg;
1843        }
1844
1845        if (req_data_len != rsp_data_len) {
1846                rval = EXT_STATUS_BUSY;
1847                ql_log(ql_log_warn, vha, 0x70aa,
1848                    "req_data_len != rsp_data_len\n");
1849                goto done_unmap_sg;
1850        }
1851
1852        req_data_len = bsg_job->request_payload.payload_len;
1853        rsp_data_len = bsg_job->reply_payload.payload_len;
1854
1855
1856        /* Alloc SRB structure */
1857        sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1858        if (!sp) {
1859                ql_dbg(ql_dbg_user, vha, 0x70ac,
1860                    "Alloc SRB structure failed\n");
1861                rval = EXT_STATUS_NO_MEMORY;
1862                goto done_unmap_sg;
1863        }
1864
1865        /*Populate srb->ctx with bidir ctx*/
1866        sp->u.bsg_job = bsg_job;
1867        sp->free = qla2x00_bsg_sp_free;
1868        sp->type = SRB_BIDI_CMD;
1869        sp->done = qla2x00_bsg_job_done;
1870
1871        /* Add the read and write sg count */
1872        tot_dsds = rsp_sg_cnt + req_sg_cnt;
1873
1874        rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1875        if (rval != EXT_STATUS_OK)
1876                goto done_free_srb;
1877        /* the bsg request  will be completed in the interrupt handler */
1878        return rval;
1879
1880done_free_srb:
1881        mempool_free(sp, ha->srb_mempool);
1882done_unmap_sg:
1883        dma_unmap_sg(&ha->pdev->dev,
1884            bsg_job->reply_payload.sg_list,
1885            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1886done_unmap_req_sg:
1887        dma_unmap_sg(&ha->pdev->dev,
1888            bsg_job->request_payload.sg_list,
1889            bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1890done:
1891
1892        /* Return an error vendor specific response
1893         * and complete the bsg request
1894         */
1895        bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1896        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1897        bsg_job->reply->reply_payload_rcv_len = 0;
1898        bsg_job->reply->result = (DID_OK) << 16;
1899        bsg_job->job_done(bsg_job);
1900        /* Always retrun success, vendor rsp carries correct status */
1901        return 0;
1902}
1903
1904static int
1905qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
1906{
1907        struct Scsi_Host *host = bsg_job->shost;
1908        scsi_qla_host_t *vha = shost_priv(host);
1909        struct qla_hw_data *ha = vha->hw;
1910        int rval = (DRIVER_ERROR << 16);
1911        struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1912        srb_t *sp;
1913        int req_sg_cnt = 0, rsp_sg_cnt = 0;
1914        struct fc_port *fcport;
1915        char  *type = "FC_BSG_HST_FX_MGMT";
1916
1917        /* Copy the IOCB specific information */
1918        piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1919            &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1920
1921        /* Dump the vendor information */
1922        ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1923            (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1924
1925        if (!vha->flags.online) {
1926                ql_log(ql_log_warn, vha, 0x70d0,
1927                    "Host is not online.\n");
1928                rval = -EIO;
1929                goto done;
1930        }
1931
1932        if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1933                req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1934                    bsg_job->request_payload.sg_list,
1935                    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1936                if (!req_sg_cnt) {
1937                        ql_log(ql_log_warn, vha, 0x70c7,
1938                            "dma_map_sg return %d for request\n", req_sg_cnt);
1939                        rval = -ENOMEM;
1940                        goto done;
1941                }
1942        }
1943
1944        if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1945                rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1946                    bsg_job->reply_payload.sg_list,
1947                    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1948                if (!rsp_sg_cnt) {
1949                        ql_log(ql_log_warn, vha, 0x70c8,
1950                            "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1951                        rval = -ENOMEM;
1952                        goto done_unmap_req_sg;
1953                }
1954        }
1955
1956        ql_dbg(ql_dbg_user, vha, 0x70c9,
1957            "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1958            "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
1959            req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1960
1961        /* Allocate a dummy fcport structure, since functions preparing the
1962         * IOCB and mailbox command retrieves port specific information
1963         * from fcport structure. For Host based ELS commands there will be
1964         * no fcport structure allocated
1965         */
1966        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1967        if (!fcport) {
1968                ql_log(ql_log_warn, vha, 0x70ca,
1969                    "Failed to allocate fcport.\n");
1970                rval = -ENOMEM;
1971                goto done_unmap_rsp_sg;
1972        }
1973
1974        /* Alloc SRB structure */
1975        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1976        if (!sp) {
1977                ql_log(ql_log_warn, vha, 0x70cb,
1978                    "qla2x00_get_sp failed.\n");
1979                rval = -ENOMEM;
1980                goto done_free_fcport;
1981        }
1982
1983        /* Initialize all required  fields of fcport */
1984        fcport->vha = vha;
1985        fcport->loop_id = piocb_rqst->dataword;
1986
1987        sp->type = SRB_FXIOCB_BCMD;
1988        sp->name = "bsg_fx_mgmt";
1989        sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
1990        sp->u.bsg_job = bsg_job;
1991        sp->free = qla2x00_bsg_sp_free;
1992        sp->done = qla2x00_bsg_job_done;
1993
1994        ql_dbg(ql_dbg_user, vha, 0x70cc,
1995            "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
1996            type, piocb_rqst->func_type, fcport->loop_id);
1997
1998        rval = qla2x00_start_sp(sp);
1999        if (rval != QLA_SUCCESS) {
2000                ql_log(ql_log_warn, vha, 0x70cd,
2001                    "qla2x00_start_sp failed=%d.\n", rval);
2002                mempool_free(sp, ha->srb_mempool);
2003                rval = -EIO;
2004                goto done_free_fcport;
2005        }
2006        return rval;
2007
2008done_free_fcport:
2009        kfree(fcport);
2010
2011done_unmap_rsp_sg:
2012        if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2013                dma_unmap_sg(&ha->pdev->dev,
2014                    bsg_job->reply_payload.sg_list,
2015                    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2016done_unmap_req_sg:
2017        if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2018                dma_unmap_sg(&ha->pdev->dev,
2019                    bsg_job->request_payload.sg_list,
2020                    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2021
2022done:
2023        return rval;
2024}
2025
2026static int
2027qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2028{
2029        switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2030        case QL_VND_LOOPBACK:
2031                return qla2x00_process_loopback(bsg_job);
2032
2033        case QL_VND_A84_RESET:
2034                return qla84xx_reset(bsg_job);
2035
2036        case QL_VND_A84_UPDATE_FW:
2037                return qla84xx_updatefw(bsg_job);
2038
2039        case QL_VND_A84_MGMT_CMD:
2040                return qla84xx_mgmt_cmd(bsg_job);
2041
2042        case QL_VND_IIDMA:
2043                return qla24xx_iidma(bsg_job);
2044
2045        case QL_VND_FCP_PRIO_CFG_CMD:
2046                return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2047
2048        case QL_VND_READ_FLASH:
2049                return qla2x00_read_optrom(bsg_job);
2050
2051        case QL_VND_UPDATE_FLASH:
2052                return qla2x00_update_optrom(bsg_job);
2053
2054        case QL_VND_SET_FRU_VERSION:
2055                return qla2x00_update_fru_versions(bsg_job);
2056
2057        case QL_VND_READ_FRU_STATUS:
2058                return qla2x00_read_fru_status(bsg_job);
2059
2060        case QL_VND_WRITE_FRU_STATUS:
2061                return qla2x00_write_fru_status(bsg_job);
2062
2063        case QL_VND_WRITE_I2C:
2064                return qla2x00_write_i2c(bsg_job);
2065
2066        case QL_VND_READ_I2C:
2067                return qla2x00_read_i2c(bsg_job);
2068
2069        case QL_VND_DIAG_IO_CMD:
2070                return qla24xx_process_bidir_cmd(bsg_job);
2071
2072        case QL_VND_FX00_MGMT_CMD:
2073                return qlafx00_mgmt_cmd(bsg_job);
2074        default:
2075                return -ENOSYS;
2076        }
2077}
2078
2079int
2080qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2081{
2082        int ret = -EINVAL;
2083        struct fc_rport *rport;
2084        fc_port_t *fcport = NULL;
2085        struct Scsi_Host *host;
2086        scsi_qla_host_t *vha;
2087
2088        /* In case no data transferred. */
2089        bsg_job->reply->reply_payload_rcv_len = 0;
2090
2091        if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
2092                rport = bsg_job->rport;
2093                fcport = *(fc_port_t **) rport->dd_data;
2094                host = rport_to_shost(rport);
2095                vha = shost_priv(host);
2096        } else {
2097                host = bsg_job->shost;
2098                vha = shost_priv(host);
2099        }
2100
2101        if (qla2x00_reset_active(vha)) {
2102                ql_dbg(ql_dbg_user, vha, 0x709f,
2103                    "BSG: ISP abort active/needed -- cmd=%d.\n",
2104                    bsg_job->request->msgcode);
2105                return -EBUSY;
2106        }
2107
2108        ql_dbg(ql_dbg_user, vha, 0x7000,
2109            "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
2110
2111        switch (bsg_job->request->msgcode) {
2112        case FC_BSG_RPT_ELS:
2113        case FC_BSG_HST_ELS_NOLOGIN:
2114                ret = qla2x00_process_els(bsg_job);
2115                break;
2116        case FC_BSG_HST_CT:
2117                ret = qla2x00_process_ct(bsg_job);
2118                break;
2119        case FC_BSG_HST_VENDOR:
2120                ret = qla2x00_process_vendor_specific(bsg_job);
2121                break;
2122        case FC_BSG_HST_ADD_RPORT:
2123        case FC_BSG_HST_DEL_RPORT:
2124        case FC_BSG_RPT_CT:
2125        default:
2126                ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2127                break;
2128        }
2129        return ret;
2130}
2131
2132int
2133qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2134{
2135        scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2136        struct qla_hw_data *ha = vha->hw;
2137        srb_t *sp;
2138        int cnt, que;
2139        unsigned long flags;
2140        struct req_que *req;
2141
2142        /* find the bsg job from the active list of commands */
2143        spin_lock_irqsave(&ha->hardware_lock, flags);
2144        for (que = 0; que < ha->max_req_queues; que++) {
2145                req = ha->req_q_map[que];
2146                if (!req)
2147                        continue;
2148
2149                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2150                        sp = req->outstanding_cmds[cnt];
2151                        if (sp) {
2152                                if (((sp->type == SRB_CT_CMD) ||
2153                                        (sp->type == SRB_ELS_CMD_HST) ||
2154                                        (sp->type == SRB_FXIOCB_BCMD))
2155                                        && (sp->u.bsg_job == bsg_job)) {
2156                                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2157                                        if (ha->isp_ops->abort_command(sp)) {
2158                                                ql_log(ql_log_warn, vha, 0x7089,
2159                                                    "mbx abort_command "
2160                                                    "failed.\n");
2161                                                bsg_job->req->errors =
2162                                                bsg_job->reply->result = -EIO;
2163                                        } else {
2164                                                ql_dbg(ql_dbg_user, vha, 0x708a,
2165                                                    "mbx abort_command "
2166                                                    "success.\n");
2167                                                bsg_job->req->errors =
2168                                                bsg_job->reply->result = 0;
2169                                        }
2170                                        spin_lock_irqsave(&ha->hardware_lock, flags);
2171                                        goto done;
2172                                }
2173                        }
2174                }
2175        }
2176        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2177        ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2178        bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2179        return 0;
2180
2181done:
2182        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2183        if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2184                kfree(sp->fcport);
2185        qla2x00_rel_sp(vha, sp);
2186        return 0;
2187}
2188