linux/drivers/scsi/qla2xxx/qla_bsg.c
<<
>>
Prefs
   1        /*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8
   9#include <linux/kthread.h>
  10#include <linux/vmalloc.h>
  11#include <linux/delay.h>
  12#include <linux/bsg-lib.h>
  13
  14/* BSG support for ELS/CT pass through */
  15void
  16qla2x00_bsg_job_done(void *ptr, int res)
  17{
  18        srb_t *sp = ptr;
  19        struct bsg_job *bsg_job = sp->u.bsg_job;
  20        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
  21
  22        bsg_reply->result = res;
  23        bsg_job_done(bsg_job, bsg_reply->result,
  24                       bsg_reply->reply_payload_rcv_len);
  25        sp->free(sp);
  26}
  27
  28void
  29qla2x00_bsg_sp_free(void *ptr)
  30{
  31        srb_t *sp = ptr;
  32        struct qla_hw_data *ha = sp->vha->hw;
  33        struct bsg_job *bsg_job = sp->u.bsg_job;
  34        struct fc_bsg_request *bsg_request = bsg_job->request;
  35        struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
  36
  37        if (sp->type == SRB_FXIOCB_BCMD) {
  38                piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
  39                    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  40
  41                if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
  42                        dma_unmap_sg(&ha->pdev->dev,
  43                            bsg_job->request_payload.sg_list,
  44                            bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  45
  46                if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
  47                        dma_unmap_sg(&ha->pdev->dev,
  48                            bsg_job->reply_payload.sg_list,
  49                            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  50        } else {
  51                dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  52                    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  53
  54                dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  55                    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  56        }
  57
  58        if (sp->type == SRB_CT_CMD ||
  59            sp->type == SRB_FXIOCB_BCMD ||
  60            sp->type == SRB_ELS_CMD_HST)
  61                kfree(sp->fcport);
  62        qla2x00_rel_sp(sp);
  63}
  64
  65int
  66qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
  67        struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  68{
  69        int i, ret, num_valid;
  70        uint8_t *bcode;
  71        struct qla_fcp_prio_entry *pri_entry;
  72        uint32_t *bcode_val_ptr, bcode_val;
  73
  74        ret = 1;
  75        num_valid = 0;
  76        bcode = (uint8_t *)pri_cfg;
  77        bcode_val_ptr = (uint32_t *)pri_cfg;
  78        bcode_val = (uint32_t)(*bcode_val_ptr);
  79
  80        if (bcode_val == 0xFFFFFFFF) {
  81                /* No FCP Priority config data in flash */
  82                ql_dbg(ql_dbg_user, vha, 0x7051,
  83                    "No FCP Priority config data.\n");
  84                return 0;
  85        }
  86
  87        if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
  88                        bcode[3] != 'S') {
  89                /* Invalid FCP priority data header*/
  90                ql_dbg(ql_dbg_user, vha, 0x7052,
  91                    "Invalid FCP Priority data header. bcode=0x%x.\n",
  92                    bcode_val);
  93                return 0;
  94        }
  95        if (flag != 1)
  96                return ret;
  97
  98        pri_entry = &pri_cfg->entry[0];
  99        for (i = 0; i < pri_cfg->num_entries; i++) {
 100                if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
 101                        num_valid++;
 102                pri_entry++;
 103        }
 104
 105        if (num_valid == 0) {
 106                /* No valid FCP priority data entries */
 107                ql_dbg(ql_dbg_user, vha, 0x7053,
 108                    "No valid FCP Priority data entries.\n");
 109                ret = 0;
 110        } else {
 111                /* FCP priority data is valid */
 112                ql_dbg(ql_dbg_user, vha, 0x7054,
 113                    "Valid FCP priority data. num entries = %d.\n",
 114                    num_valid);
 115        }
 116
 117        return ret;
 118}
 119
 120static int
 121qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
 122{
 123        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 124        struct fc_bsg_request *bsg_request = bsg_job->request;
 125        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 126        scsi_qla_host_t *vha = shost_priv(host);
 127        struct qla_hw_data *ha = vha->hw;
 128        int ret = 0;
 129        uint32_t len;
 130        uint32_t oper;
 131
 132        if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
 133                ret = -EINVAL;
 134                goto exit_fcp_prio_cfg;
 135        }
 136
 137        /* Get the sub command */
 138        oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 139
 140        /* Only set config is allowed if config memory is not allocated */
 141        if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
 142                ret = -EINVAL;
 143                goto exit_fcp_prio_cfg;
 144        }
 145        switch (oper) {
 146        case QLFC_FCP_PRIO_DISABLE:
 147                if (ha->flags.fcp_prio_enabled) {
 148                        ha->flags.fcp_prio_enabled = 0;
 149                        ha->fcp_prio_cfg->attributes &=
 150                                ~FCP_PRIO_ATTR_ENABLE;
 151                        qla24xx_update_all_fcp_prio(vha);
 152                        bsg_reply->result = DID_OK;
 153                } else {
 154                        ret = -EINVAL;
 155                        bsg_reply->result = (DID_ERROR << 16);
 156                        goto exit_fcp_prio_cfg;
 157                }
 158                break;
 159
 160        case QLFC_FCP_PRIO_ENABLE:
 161                if (!ha->flags.fcp_prio_enabled) {
 162                        if (ha->fcp_prio_cfg) {
 163                                ha->flags.fcp_prio_enabled = 1;
 164                                ha->fcp_prio_cfg->attributes |=
 165                                    FCP_PRIO_ATTR_ENABLE;
 166                                qla24xx_update_all_fcp_prio(vha);
 167                                bsg_reply->result = DID_OK;
 168                        } else {
 169                                ret = -EINVAL;
 170                                bsg_reply->result = (DID_ERROR << 16);
 171                                goto exit_fcp_prio_cfg;
 172                        }
 173                }
 174                break;
 175
 176        case QLFC_FCP_PRIO_GET_CONFIG:
 177                len = bsg_job->reply_payload.payload_len;
 178                if (!len || len > FCP_PRIO_CFG_SIZE) {
 179                        ret = -EINVAL;
 180                        bsg_reply->result = (DID_ERROR << 16);
 181                        goto exit_fcp_prio_cfg;
 182                }
 183
 184                bsg_reply->result = DID_OK;
 185                bsg_reply->reply_payload_rcv_len =
 186                        sg_copy_from_buffer(
 187                        bsg_job->reply_payload.sg_list,
 188                        bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
 189                        len);
 190
 191                break;
 192
 193        case QLFC_FCP_PRIO_SET_CONFIG:
 194                len = bsg_job->request_payload.payload_len;
 195                if (!len || len > FCP_PRIO_CFG_SIZE) {
 196                        bsg_reply->result = (DID_ERROR << 16);
 197                        ret = -EINVAL;
 198                        goto exit_fcp_prio_cfg;
 199                }
 200
 201                if (!ha->fcp_prio_cfg) {
 202                        ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
 203                        if (!ha->fcp_prio_cfg) {
 204                                ql_log(ql_log_warn, vha, 0x7050,
 205                                    "Unable to allocate memory for fcp prio "
 206                                    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
 207                                bsg_reply->result = (DID_ERROR << 16);
 208                                ret = -ENOMEM;
 209                                goto exit_fcp_prio_cfg;
 210                        }
 211                }
 212
 213                memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
 214                sg_copy_to_buffer(bsg_job->request_payload.sg_list,
 215                bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
 216                        FCP_PRIO_CFG_SIZE);
 217
 218                /* validate fcp priority data */
 219
 220                if (!qla24xx_fcp_prio_cfg_valid(vha,
 221                    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
 222                        bsg_reply->result = (DID_ERROR << 16);
 223                        ret = -EINVAL;
 224                        /* If buffer was invalidatic int
 225                         * fcp_prio_cfg is of no use
 226                         */
 227                        vfree(ha->fcp_prio_cfg);
 228                        ha->fcp_prio_cfg = NULL;
 229                        goto exit_fcp_prio_cfg;
 230                }
 231
 232                ha->flags.fcp_prio_enabled = 0;
 233                if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
 234                        ha->flags.fcp_prio_enabled = 1;
 235                qla24xx_update_all_fcp_prio(vha);
 236                bsg_reply->result = DID_OK;
 237                break;
 238        default:
 239                ret = -EINVAL;
 240                break;
 241        }
 242exit_fcp_prio_cfg:
 243        if (!ret)
 244                bsg_job_done(bsg_job, bsg_reply->result,
 245                               bsg_reply->reply_payload_rcv_len);
 246        return ret;
 247}
 248
 249static int
 250qla2x00_process_els(struct bsg_job *bsg_job)
 251{
 252        struct fc_bsg_request *bsg_request = bsg_job->request;
 253        struct fc_rport *rport;
 254        fc_port_t *fcport = NULL;
 255        struct Scsi_Host *host;
 256        scsi_qla_host_t *vha;
 257        struct qla_hw_data *ha;
 258        srb_t *sp;
 259        const char *type;
 260        int req_sg_cnt, rsp_sg_cnt;
 261        int rval =  (DRIVER_ERROR << 16);
 262        uint16_t nextlid = 0;
 263
 264        if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
 265                rport = fc_bsg_to_rport(bsg_job);
 266                fcport = *(fc_port_t **) rport->dd_data;
 267                host = rport_to_shost(rport);
 268                vha = shost_priv(host);
 269                ha = vha->hw;
 270                type = "FC_BSG_RPT_ELS";
 271        } else {
 272                host = fc_bsg_to_shost(bsg_job);
 273                vha = shost_priv(host);
 274                ha = vha->hw;
 275                type = "FC_BSG_HST_ELS_NOLOGIN";
 276        }
 277
 278        if (!vha->flags.online) {
 279                ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
 280                rval = -EIO;
 281                goto done;
 282        }
 283
 284        /* pass through is supported only for ISP 4Gb or higher */
 285        if (!IS_FWI2_CAPABLE(ha)) {
 286                ql_dbg(ql_dbg_user, vha, 0x7001,
 287                    "ELS passthru not supported for ISP23xx based adapters.\n");
 288                rval = -EPERM;
 289                goto done;
 290        }
 291
 292        /*  Multiple SG's are not supported for ELS requests */
 293        if (bsg_job->request_payload.sg_cnt > 1 ||
 294                bsg_job->reply_payload.sg_cnt > 1) {
 295                ql_dbg(ql_dbg_user, vha, 0x7002,
 296                    "Multiple SG's are not supported for ELS requests, "
 297                    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
 298                    bsg_job->request_payload.sg_cnt,
 299                    bsg_job->reply_payload.sg_cnt);
 300                rval = -EPERM;
 301                goto done;
 302        }
 303
 304        /* ELS request for rport */
 305        if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
 306                /* make sure the rport is logged in,
 307                 * if not perform fabric login
 308                 */
 309                if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
 310                        ql_dbg(ql_dbg_user, vha, 0x7003,
 311                            "Failed to login port %06X for ELS passthru.\n",
 312                            fcport->d_id.b24);
 313                        rval = -EIO;
 314                        goto done;
 315                }
 316        } else {
 317                /* Allocate a dummy fcport structure, since functions
 318                 * preparing the IOCB and mailbox command retrieves port
 319                 * specific information from fcport structure. For Host based
 320                 * ELS commands there will be no fcport structure allocated
 321                 */
 322                fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 323                if (!fcport) {
 324                        rval = -ENOMEM;
 325                        goto done;
 326                }
 327
 328                /* Initialize all required  fields of fcport */
 329                fcport->vha = vha;
 330                fcport->d_id.b.al_pa =
 331                        bsg_request->rqst_data.h_els.port_id[0];
 332                fcport->d_id.b.area =
 333                        bsg_request->rqst_data.h_els.port_id[1];
 334                fcport->d_id.b.domain =
 335                        bsg_request->rqst_data.h_els.port_id[2];
 336                fcport->loop_id =
 337                        (fcport->d_id.b.al_pa == 0xFD) ?
 338                        NPH_FABRIC_CONTROLLER : NPH_F_PORT;
 339        }
 340
 341        req_sg_cnt =
 342                dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 343                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 344        if (!req_sg_cnt) {
 345                rval = -ENOMEM;
 346                goto done_free_fcport;
 347        }
 348
 349        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 350                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 351        if (!rsp_sg_cnt) {
 352                rval = -ENOMEM;
 353                goto done_free_fcport;
 354        }
 355
 356        if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
 357                (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
 358                ql_log(ql_log_warn, vha, 0x7008,
 359                    "dma mapping resulted in different sg counts, "
 360                    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
 361                    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
 362                    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
 363                rval = -EAGAIN;
 364                goto done_unmap_sg;
 365        }
 366
 367        /* Alloc SRB structure */
 368        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 369        if (!sp) {
 370                rval = -ENOMEM;
 371                goto done_unmap_sg;
 372        }
 373
 374        sp->type =
 375                (bsg_request->msgcode == FC_BSG_RPT_ELS ?
 376                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
 377        sp->name =
 378                (bsg_request->msgcode == FC_BSG_RPT_ELS ?
 379                 "bsg_els_rpt" : "bsg_els_hst");
 380        sp->u.bsg_job = bsg_job;
 381        sp->free = qla2x00_bsg_sp_free;
 382        sp->done = qla2x00_bsg_job_done;
 383
 384        ql_dbg(ql_dbg_user, vha, 0x700a,
 385            "bsg rqst type: %s els type: %x - loop-id=%x "
 386            "portid=%-2x%02x%02x.\n", type,
 387            bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
 388            fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
 389
 390        rval = qla2x00_start_sp(sp);
 391        if (rval != QLA_SUCCESS) {
 392                ql_log(ql_log_warn, vha, 0x700e,
 393                    "qla2x00_start_sp failed = %d\n", rval);
 394                qla2x00_rel_sp(sp);
 395                rval = -EIO;
 396                goto done_unmap_sg;
 397        }
 398        return rval;
 399
 400done_unmap_sg:
 401        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 402                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 403        dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 404                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 405        goto done_free_fcport;
 406
 407done_free_fcport:
 408        if (bsg_request->msgcode == FC_BSG_RPT_ELS)
 409                kfree(fcport);
 410done:
 411        return rval;
 412}
 413
 414static inline uint16_t
 415qla24xx_calc_ct_iocbs(uint16_t dsds)
 416{
 417        uint16_t iocbs;
 418
 419        iocbs = 1;
 420        if (dsds > 2) {
 421                iocbs += (dsds - 2) / 5;
 422                if ((dsds - 2) % 5)
 423                        iocbs++;
 424        }
 425        return iocbs;
 426}
 427
 428static int
 429qla2x00_process_ct(struct bsg_job *bsg_job)
 430{
 431        srb_t *sp;
 432        struct fc_bsg_request *bsg_request = bsg_job->request;
 433        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 434        scsi_qla_host_t *vha = shost_priv(host);
 435        struct qla_hw_data *ha = vha->hw;
 436        int rval = (DRIVER_ERROR << 16);
 437        int req_sg_cnt, rsp_sg_cnt;
 438        uint16_t loop_id;
 439        struct fc_port *fcport;
 440        char  *type = "FC_BSG_HST_CT";
 441
 442        req_sg_cnt =
 443                dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 444                        bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 445        if (!req_sg_cnt) {
 446                ql_log(ql_log_warn, vha, 0x700f,
 447                    "dma_map_sg return %d for request\n", req_sg_cnt);
 448                rval = -ENOMEM;
 449                goto done;
 450        }
 451
 452        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 453                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 454        if (!rsp_sg_cnt) {
 455                ql_log(ql_log_warn, vha, 0x7010,
 456                    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
 457                rval = -ENOMEM;
 458                goto done;
 459        }
 460
 461        if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
 462            (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
 463                ql_log(ql_log_warn, vha, 0x7011,
 464                    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
 465                    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
 466                    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
 467                rval = -EAGAIN;
 468                goto done_unmap_sg;
 469        }
 470
 471        if (!vha->flags.online) {
 472                ql_log(ql_log_warn, vha, 0x7012,
 473                    "Host is not online.\n");
 474                rval = -EIO;
 475                goto done_unmap_sg;
 476        }
 477
 478        loop_id =
 479                (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
 480                        >> 24;
 481        switch (loop_id) {
 482        case 0xFC:
 483                loop_id = cpu_to_le16(NPH_SNS);
 484                break;
 485        case 0xFA:
 486                loop_id = vha->mgmt_svr_loop_id;
 487                break;
 488        default:
 489                ql_dbg(ql_dbg_user, vha, 0x7013,
 490                    "Unknown loop id: %x.\n", loop_id);
 491                rval = -EINVAL;
 492                goto done_unmap_sg;
 493        }
 494
 495        /* Allocate a dummy fcport structure, since functions preparing the
 496         * IOCB and mailbox command retrieves port specific information
 497         * from fcport structure. For Host based ELS commands there will be
 498         * no fcport structure allocated
 499         */
 500        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 501        if (!fcport) {
 502                ql_log(ql_log_warn, vha, 0x7014,
 503                    "Failed to allocate fcport.\n");
 504                rval = -ENOMEM;
 505                goto done_unmap_sg;
 506        }
 507
 508        /* Initialize all required  fields of fcport */
 509        fcport->vha = vha;
 510        fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
 511        fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
 512        fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
 513        fcport->loop_id = loop_id;
 514
 515        /* Alloc SRB structure */
 516        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 517        if (!sp) {
 518                ql_log(ql_log_warn, vha, 0x7015,
 519                    "qla2x00_get_sp failed.\n");
 520                rval = -ENOMEM;
 521                goto done_free_fcport;
 522        }
 523
 524        sp->type = SRB_CT_CMD;
 525        sp->name = "bsg_ct";
 526        sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
 527        sp->u.bsg_job = bsg_job;
 528        sp->free = qla2x00_bsg_sp_free;
 529        sp->done = qla2x00_bsg_job_done;
 530
 531        ql_dbg(ql_dbg_user, vha, 0x7016,
 532            "bsg rqst type: %s else type: %x - "
 533            "loop-id=%x portid=%02x%02x%02x.\n", type,
 534            (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
 535            fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
 536            fcport->d_id.b.al_pa);
 537
 538        rval = qla2x00_start_sp(sp);
 539        if (rval != QLA_SUCCESS) {
 540                ql_log(ql_log_warn, vha, 0x7017,
 541                    "qla2x00_start_sp failed=%d.\n", rval);
 542                qla2x00_rel_sp(sp);
 543                rval = -EIO;
 544                goto done_free_fcport;
 545        }
 546        return rval;
 547
 548done_free_fcport:
 549        kfree(fcport);
 550done_unmap_sg:
 551        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 552                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 553        dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 554                bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 555done:
 556        return rval;
 557}
 558
 559/* Disable loopback mode */
 560static inline int
 561qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 562                            int wait, int wait2)
 563{
 564        int ret = 0;
 565        int rval = 0;
 566        uint16_t new_config[4];
 567        struct qla_hw_data *ha = vha->hw;
 568
 569        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
 570                goto done_reset_internal;
 571
 572        memset(new_config, 0 , sizeof(new_config));
 573        if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
 574            ENABLE_INTERNAL_LOOPBACK ||
 575            (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
 576            ENABLE_EXTERNAL_LOOPBACK) {
 577                new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
 578                ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
 579                    (new_config[0] & INTERNAL_LOOPBACK_MASK));
 580                memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
 581
 582                ha->notify_dcbx_comp = wait;
 583                ha->notify_lb_portup_comp = wait2;
 584
 585                ret = qla81xx_set_port_config(vha, new_config);
 586                if (ret != QLA_SUCCESS) {
 587                        ql_log(ql_log_warn, vha, 0x7025,
 588                            "Set port config failed.\n");
 589                        ha->notify_dcbx_comp = 0;
 590                        ha->notify_lb_portup_comp = 0;
 591                        rval = -EINVAL;
 592                        goto done_reset_internal;
 593                }
 594
 595                /* Wait for DCBX complete event */
 596                if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
 597                        (DCBX_COMP_TIMEOUT * HZ))) {
 598                        ql_dbg(ql_dbg_user, vha, 0x7026,
 599                            "DCBX completion not received.\n");
 600                        ha->notify_dcbx_comp = 0;
 601                        ha->notify_lb_portup_comp = 0;
 602                        rval = -EINVAL;
 603                        goto done_reset_internal;
 604                } else
 605                        ql_dbg(ql_dbg_user, vha, 0x7027,
 606                            "DCBX completion received.\n");
 607
 608                if (wait2 &&
 609                    !wait_for_completion_timeout(&ha->lb_portup_comp,
 610                    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
 611                        ql_dbg(ql_dbg_user, vha, 0x70c5,
 612                            "Port up completion not received.\n");
 613                        ha->notify_lb_portup_comp = 0;
 614                        rval = -EINVAL;
 615                        goto done_reset_internal;
 616                } else
 617                        ql_dbg(ql_dbg_user, vha, 0x70c6,
 618                            "Port up completion received.\n");
 619
 620                ha->notify_dcbx_comp = 0;
 621                ha->notify_lb_portup_comp = 0;
 622        }
 623done_reset_internal:
 624        return rval;
 625}
 626
 627/*
 628 * Set the port configuration to enable the internal or external loopback
 629 * depending on the loopback mode.
 630 */
 631static inline int
 632qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 633        uint16_t *new_config, uint16_t mode)
 634{
 635        int ret = 0;
 636        int rval = 0;
 637        unsigned long rem_tmo = 0, current_tmo = 0;
 638        struct qla_hw_data *ha = vha->hw;
 639
 640        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
 641                goto done_set_internal;
 642
 643        if (mode == INTERNAL_LOOPBACK)
 644                new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
 645        else if (mode == EXTERNAL_LOOPBACK)
 646                new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
 647        ql_dbg(ql_dbg_user, vha, 0x70be,
 648             "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
 649
 650        memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
 651
 652        ha->notify_dcbx_comp = 1;
 653        ret = qla81xx_set_port_config(vha, new_config);
 654        if (ret != QLA_SUCCESS) {
 655                ql_log(ql_log_warn, vha, 0x7021,
 656                    "set port config failed.\n");
 657                ha->notify_dcbx_comp = 0;
 658                rval = -EINVAL;
 659                goto done_set_internal;
 660        }
 661
 662        /* Wait for DCBX complete event */
 663        current_tmo = DCBX_COMP_TIMEOUT * HZ;
 664        while (1) {
 665                rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
 666                    current_tmo);
 667                if (!ha->idc_extend_tmo || rem_tmo) {
 668                        ha->idc_extend_tmo = 0;
 669                        break;
 670                }
 671                current_tmo = ha->idc_extend_tmo * HZ;
 672                ha->idc_extend_tmo = 0;
 673        }
 674
 675        if (!rem_tmo) {
 676                ql_dbg(ql_dbg_user, vha, 0x7022,
 677                    "DCBX completion not received.\n");
 678                ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
 679                /*
 680                 * If the reset of the loopback mode doesn't work take a FCoE
 681                 * dump and reset the chip.
 682                 */
 683                if (ret) {
 684                        ha->isp_ops->fw_dump(vha, 0);
 685                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 686                }
 687                rval = -EINVAL;
 688        } else {
 689                if (ha->flags.idc_compl_status) {
 690                        ql_dbg(ql_dbg_user, vha, 0x70c3,
 691                            "Bad status in IDC Completion AEN\n");
 692                        rval = -EINVAL;
 693                        ha->flags.idc_compl_status = 0;
 694                } else
 695                        ql_dbg(ql_dbg_user, vha, 0x7023,
 696                            "DCBX completion received.\n");
 697        }
 698
 699        ha->notify_dcbx_comp = 0;
 700        ha->idc_extend_tmo = 0;
 701
 702done_set_internal:
 703        return rval;
 704}
 705
 706static int
 707qla2x00_process_loopback(struct bsg_job *bsg_job)
 708{
 709        struct fc_bsg_request *bsg_request = bsg_job->request;
 710        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 711        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 712        scsi_qla_host_t *vha = shost_priv(host);
 713        struct qla_hw_data *ha = vha->hw;
 714        int rval;
 715        uint8_t command_sent;
 716        char *type;
 717        struct msg_echo_lb elreq;
 718        uint16_t response[MAILBOX_REGISTER_COUNT];
 719        uint16_t config[4], new_config[4];
 720        uint8_t *fw_sts_ptr;
 721        uint8_t *req_data = NULL;
 722        dma_addr_t req_data_dma;
 723        uint32_t req_data_len;
 724        uint8_t *rsp_data = NULL;
 725        dma_addr_t rsp_data_dma;
 726        uint32_t rsp_data_len;
 727
 728        if (!vha->flags.online) {
 729                ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
 730                return -EIO;
 731        }
 732
 733        memset(&elreq, 0, sizeof(elreq));
 734
 735        elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
 736                bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
 737                DMA_TO_DEVICE);
 738
 739        if (!elreq.req_sg_cnt) {
 740                ql_log(ql_log_warn, vha, 0x701a,
 741                    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
 742                return -ENOMEM;
 743        }
 744
 745        elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
 746                bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
 747                DMA_FROM_DEVICE);
 748
 749        if (!elreq.rsp_sg_cnt) {
 750                ql_log(ql_log_warn, vha, 0x701b,
 751                    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
 752                rval = -ENOMEM;
 753                goto done_unmap_req_sg;
 754        }
 755
 756        if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
 757                (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
 758                ql_log(ql_log_warn, vha, 0x701c,
 759                    "dma mapping resulted in different sg counts, "
 760                    "request_sg_cnt: %x dma_request_sg_cnt: %x "
 761                    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
 762                    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
 763                    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
 764                rval = -EAGAIN;
 765                goto done_unmap_sg;
 766        }
 767        req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
 768        req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
 769                &req_data_dma, GFP_KERNEL);
 770        if (!req_data) {
 771                ql_log(ql_log_warn, vha, 0x701d,
 772                    "dma alloc failed for req_data.\n");
 773                rval = -ENOMEM;
 774                goto done_unmap_sg;
 775        }
 776
 777        rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
 778                &rsp_data_dma, GFP_KERNEL);
 779        if (!rsp_data) {
 780                ql_log(ql_log_warn, vha, 0x7004,
 781                    "dma alloc failed for rsp_data.\n");
 782                rval = -ENOMEM;
 783                goto done_free_dma_req;
 784        }
 785
 786        /* Copy the request buffer in req_data now */
 787        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
 788                bsg_job->request_payload.sg_cnt, req_data, req_data_len);
 789
 790        elreq.send_dma = req_data_dma;
 791        elreq.rcv_dma = rsp_data_dma;
 792        elreq.transfer_size = req_data_len;
 793
 794        elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 795        elreq.iteration_count =
 796            bsg_request->rqst_data.h_vendor.vendor_cmd[2];
 797
 798        if (atomic_read(&vha->loop_state) == LOOP_READY &&
 799            (ha->current_topology == ISP_CFG_F ||
 800            (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
 801             req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
 802            elreq.options == EXTERNAL_LOOPBACK) {
 803                type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
 804                ql_dbg(ql_dbg_user, vha, 0x701e,
 805                    "BSG request type: %s.\n", type);
 806                command_sent = INT_DEF_LB_ECHO_CMD;
 807                rval = qla2x00_echo_test(vha, &elreq, response);
 808        } else {
 809                if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
 810                        memset(config, 0, sizeof(config));
 811                        memset(new_config, 0, sizeof(new_config));
 812
 813                        if (qla81xx_get_port_config(vha, config)) {
 814                                ql_log(ql_log_warn, vha, 0x701f,
 815                                    "Get port config failed.\n");
 816                                rval = -EPERM;
 817                                goto done_free_dma_rsp;
 818                        }
 819
 820                        if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
 821                                ql_dbg(ql_dbg_user, vha, 0x70c4,
 822                                    "Loopback operation already in "
 823                                    "progress.\n");
 824                                rval = -EAGAIN;
 825                                goto done_free_dma_rsp;
 826                        }
 827
 828                        ql_dbg(ql_dbg_user, vha, 0x70c0,
 829                            "elreq.options=%04x\n", elreq.options);
 830
 831                        if (elreq.options == EXTERNAL_LOOPBACK)
 832                                if (IS_QLA8031(ha) || IS_QLA8044(ha))
 833                                        rval = qla81xx_set_loopback_mode(vha,
 834                                            config, new_config, elreq.options);
 835                                else
 836                                        rval = qla81xx_reset_loopback_mode(vha,
 837                                            config, 1, 0);
 838                        else
 839                                rval = qla81xx_set_loopback_mode(vha, config,
 840                                    new_config, elreq.options);
 841
 842                        if (rval) {
 843                                rval = -EPERM;
 844                                goto done_free_dma_rsp;
 845                        }
 846
 847                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
 848                        ql_dbg(ql_dbg_user, vha, 0x7028,
 849                            "BSG request type: %s.\n", type);
 850
 851                        command_sent = INT_DEF_LB_LOOPBACK_CMD;
 852                        rval = qla2x00_loopback_test(vha, &elreq, response);
 853
 854                        if (response[0] == MBS_COMMAND_ERROR &&
 855                                        response[1] == MBS_LB_RESET) {
 856                                ql_log(ql_log_warn, vha, 0x7029,
 857                                    "MBX command error, Aborting ISP.\n");
 858                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 859                                qla2xxx_wake_dpc(vha);
 860                                qla2x00_wait_for_chip_reset(vha);
 861                                /* Also reset the MPI */
 862                                if (IS_QLA81XX(ha)) {
 863                                        if (qla81xx_restart_mpi_firmware(vha) !=
 864                                            QLA_SUCCESS) {
 865                                                ql_log(ql_log_warn, vha, 0x702a,
 866                                                    "MPI reset failed.\n");
 867                                        }
 868                                }
 869
 870                                rval = -EIO;
 871                                goto done_free_dma_rsp;
 872                        }
 873
 874                        if (new_config[0]) {
 875                                int ret;
 876
 877                                /* Revert back to original port config
 878                                 * Also clear internal loopback
 879                                 */
 880                                ret = qla81xx_reset_loopback_mode(vha,
 881                                    new_config, 0, 1);
 882                                if (ret) {
 883                                        /*
 884                                         * If the reset of the loopback mode
 885                                         * doesn't work take FCoE dump and then
 886                                         * reset the chip.
 887                                         */
 888                                        ha->isp_ops->fw_dump(vha, 0);
 889                                        set_bit(ISP_ABORT_NEEDED,
 890                                            &vha->dpc_flags);
 891                                }
 892
 893                        }
 894
 895                } else {
 896                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
 897                        ql_dbg(ql_dbg_user, vha, 0x702b,
 898                            "BSG request type: %s.\n", type);
 899                        command_sent = INT_DEF_LB_LOOPBACK_CMD;
 900                        rval = qla2x00_loopback_test(vha, &elreq, response);
 901                }
 902        }
 903
 904        if (rval) {
 905                ql_log(ql_log_warn, vha, 0x702c,
 906                    "Vendor request %s failed.\n", type);
 907
 908                rval = 0;
 909                bsg_reply->result = (DID_ERROR << 16);
 910                bsg_reply->reply_payload_rcv_len = 0;
 911        } else {
 912                ql_dbg(ql_dbg_user, vha, 0x702d,
 913                    "Vendor request %s completed.\n", type);
 914                bsg_reply->result = (DID_OK << 16);
 915                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 916                        bsg_job->reply_payload.sg_cnt, rsp_data,
 917                        rsp_data_len);
 918        }
 919
 920        bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
 921            sizeof(response) + sizeof(uint8_t);
 922        fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
 923        memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
 924                        sizeof(response));
 925        fw_sts_ptr += sizeof(response);
 926        *fw_sts_ptr = command_sent;
 927
 928done_free_dma_rsp:
 929        dma_free_coherent(&ha->pdev->dev, rsp_data_len,
 930                rsp_data, rsp_data_dma);
 931done_free_dma_req:
 932        dma_free_coherent(&ha->pdev->dev, req_data_len,
 933                req_data, req_data_dma);
 934done_unmap_sg:
 935        dma_unmap_sg(&ha->pdev->dev,
 936            bsg_job->reply_payload.sg_list,
 937            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 938done_unmap_req_sg:
 939        dma_unmap_sg(&ha->pdev->dev,
 940            bsg_job->request_payload.sg_list,
 941            bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 942        if (!rval)
 943                bsg_job_done(bsg_job, bsg_reply->result,
 944                               bsg_reply->reply_payload_rcv_len);
 945        return rval;
 946}
 947
 948static int
 949qla84xx_reset(struct bsg_job *bsg_job)
 950{
 951        struct fc_bsg_request *bsg_request = bsg_job->request;
 952        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 953        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 954        scsi_qla_host_t *vha = shost_priv(host);
 955        struct qla_hw_data *ha = vha->hw;
 956        int rval = 0;
 957        uint32_t flag;
 958
 959        if (!IS_QLA84XX(ha)) {
 960                ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
 961                return -EINVAL;
 962        }
 963
 964        flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 965
 966        rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
 967
 968        if (rval) {
 969                ql_log(ql_log_warn, vha, 0x7030,
 970                    "Vendor request 84xx reset failed.\n");
 971                rval = (DID_ERROR << 16);
 972
 973        } else {
 974                ql_dbg(ql_dbg_user, vha, 0x7031,
 975                    "Vendor request 84xx reset completed.\n");
 976                bsg_reply->result = DID_OK;
 977                bsg_job_done(bsg_job, bsg_reply->result,
 978                               bsg_reply->reply_payload_rcv_len);
 979        }
 980
 981        return rval;
 982}
 983
 984static int
 985qla84xx_updatefw(struct bsg_job *bsg_job)
 986{
 987        struct fc_bsg_request *bsg_request = bsg_job->request;
 988        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 989        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 990        scsi_qla_host_t *vha = shost_priv(host);
 991        struct qla_hw_data *ha = vha->hw;
 992        struct verify_chip_entry_84xx *mn = NULL;
 993        dma_addr_t mn_dma, fw_dma;
 994        void *fw_buf = NULL;
 995        int rval = 0;
 996        uint32_t sg_cnt;
 997        uint32_t data_len;
 998        uint16_t options;
 999        uint32_t flag;
1000        uint32_t fw_ver;
1001
1002        if (!IS_QLA84XX(ha)) {
1003                ql_dbg(ql_dbg_user, vha, 0x7032,
1004                    "Not 84xx, exiting.\n");
1005                return -EINVAL;
1006        }
1007
1008        sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1009                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1010        if (!sg_cnt) {
1011                ql_log(ql_log_warn, vha, 0x7033,
1012                    "dma_map_sg returned %d for request.\n", sg_cnt);
1013                return -ENOMEM;
1014        }
1015
1016        if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1017                ql_log(ql_log_warn, vha, 0x7034,
1018                    "DMA mapping resulted in different sg counts, "
1019                    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1020                    bsg_job->request_payload.sg_cnt, sg_cnt);
1021                rval = -EAGAIN;
1022                goto done_unmap_sg;
1023        }
1024
1025        data_len = bsg_job->request_payload.payload_len;
1026        fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1027                &fw_dma, GFP_KERNEL);
1028        if (!fw_buf) {
1029                ql_log(ql_log_warn, vha, 0x7035,
1030                    "DMA alloc failed for fw_buf.\n");
1031                rval = -ENOMEM;
1032                goto done_unmap_sg;
1033        }
1034
1035        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1036                bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1037
1038        mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1039        if (!mn) {
1040                ql_log(ql_log_warn, vha, 0x7036,
1041                    "DMA alloc failed for fw buffer.\n");
1042                rval = -ENOMEM;
1043                goto done_free_fw_buf;
1044        }
1045
1046        flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1047        fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1048
1049        memset(mn, 0, sizeof(struct access_chip_84xx));
1050        mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1051        mn->entry_count = 1;
1052
1053        options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1054        if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1055                options |= VCO_DIAG_FW;
1056
1057        mn->options = cpu_to_le16(options);
1058        mn->fw_ver =  cpu_to_le32(fw_ver);
1059        mn->fw_size =  cpu_to_le32(data_len);
1060        mn->fw_seq_size =  cpu_to_le32(data_len);
1061        mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1062        mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1063        mn->dseg_length = cpu_to_le32(data_len);
1064        mn->data_seg_cnt = cpu_to_le16(1);
1065
1066        rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1067
1068        if (rval) {
1069                ql_log(ql_log_warn, vha, 0x7037,
1070                    "Vendor request 84xx updatefw failed.\n");
1071
1072                rval = (DID_ERROR << 16);
1073        } else {
1074                ql_dbg(ql_dbg_user, vha, 0x7038,
1075                    "Vendor request 84xx updatefw completed.\n");
1076
1077                bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1078                bsg_reply->result = DID_OK;
1079        }
1080
1081        dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1082
1083done_free_fw_buf:
1084        dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1085
1086done_unmap_sg:
1087        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1088                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1089
1090        if (!rval)
1091                bsg_job_done(bsg_job, bsg_reply->result,
1092                               bsg_reply->reply_payload_rcv_len);
1093        return rval;
1094}
1095
1096static int
1097qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1098{
1099        struct fc_bsg_request *bsg_request = bsg_job->request;
1100        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1101        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1102        scsi_qla_host_t *vha = shost_priv(host);
1103        struct qla_hw_data *ha = vha->hw;
1104        struct access_chip_84xx *mn = NULL;
1105        dma_addr_t mn_dma, mgmt_dma;
1106        void *mgmt_b = NULL;
1107        int rval = 0;
1108        struct qla_bsg_a84_mgmt *ql84_mgmt;
1109        uint32_t sg_cnt;
1110        uint32_t data_len = 0;
1111        uint32_t dma_direction = DMA_NONE;
1112
1113        if (!IS_QLA84XX(ha)) {
1114                ql_log(ql_log_warn, vha, 0x703a,
1115                    "Not 84xx, exiting.\n");
1116                return -EINVAL;
1117        }
1118
1119        mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1120        if (!mn) {
1121                ql_log(ql_log_warn, vha, 0x703c,
1122                    "DMA alloc failed for fw buffer.\n");
1123                return -ENOMEM;
1124        }
1125
1126        mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1127        mn->entry_count = 1;
1128        ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1129        switch (ql84_mgmt->mgmt.cmd) {
1130        case QLA84_MGMT_READ_MEM:
1131        case QLA84_MGMT_GET_INFO:
1132                sg_cnt = dma_map_sg(&ha->pdev->dev,
1133                        bsg_job->reply_payload.sg_list,
1134                        bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1135                if (!sg_cnt) {
1136                        ql_log(ql_log_warn, vha, 0x703d,
1137                            "dma_map_sg returned %d for reply.\n", sg_cnt);
1138                        rval = -ENOMEM;
1139                        goto exit_mgmt;
1140                }
1141
1142                dma_direction = DMA_FROM_DEVICE;
1143
1144                if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1145                        ql_log(ql_log_warn, vha, 0x703e,
1146                            "DMA mapping resulted in different sg counts, "
1147                            "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1148                            bsg_job->reply_payload.sg_cnt, sg_cnt);
1149                        rval = -EAGAIN;
1150                        goto done_unmap_sg;
1151                }
1152
1153                data_len = bsg_job->reply_payload.payload_len;
1154
1155                mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1156                    &mgmt_dma, GFP_KERNEL);
1157                if (!mgmt_b) {
1158                        ql_log(ql_log_warn, vha, 0x703f,
1159                            "DMA alloc failed for mgmt_b.\n");
1160                        rval = -ENOMEM;
1161                        goto done_unmap_sg;
1162                }
1163
1164                if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1165                        mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1166                        mn->parameter1 =
1167                                cpu_to_le32(
1168                                ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1169
1170                } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1171                        mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1172                        mn->parameter1 =
1173                                cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1174
1175                        mn->parameter2 =
1176                                cpu_to_le32(
1177                                ql84_mgmt->mgmt.mgmtp.u.info.context);
1178                }
1179                break;
1180
1181        case QLA84_MGMT_WRITE_MEM:
1182                sg_cnt = dma_map_sg(&ha->pdev->dev,
1183                        bsg_job->request_payload.sg_list,
1184                        bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1185
1186                if (!sg_cnt) {
1187                        ql_log(ql_log_warn, vha, 0x7040,
1188                            "dma_map_sg returned %d.\n", sg_cnt);
1189                        rval = -ENOMEM;
1190                        goto exit_mgmt;
1191                }
1192
1193                dma_direction = DMA_TO_DEVICE;
1194
1195                if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1196                        ql_log(ql_log_warn, vha, 0x7041,
1197                            "DMA mapping resulted in different sg counts, "
1198                            "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1199                            bsg_job->request_payload.sg_cnt, sg_cnt);
1200                        rval = -EAGAIN;
1201                        goto done_unmap_sg;
1202                }
1203
1204                data_len = bsg_job->request_payload.payload_len;
1205                mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1206                        &mgmt_dma, GFP_KERNEL);
1207                if (!mgmt_b) {
1208                        ql_log(ql_log_warn, vha, 0x7042,
1209                            "DMA alloc failed for mgmt_b.\n");
1210                        rval = -ENOMEM;
1211                        goto done_unmap_sg;
1212                }
1213
1214                sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1215                        bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1216
1217                mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1218                mn->parameter1 =
1219                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1220                break;
1221
1222        case QLA84_MGMT_CHNG_CONFIG:
1223                mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1224                mn->parameter1 =
1225                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1226
1227                mn->parameter2 =
1228                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1229
1230                mn->parameter3 =
1231                        cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1232                break;
1233
1234        default:
1235                rval = -EIO;
1236                goto exit_mgmt;
1237        }
1238
1239        if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1240                mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1241                mn->dseg_count = cpu_to_le16(1);
1242                mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1243                mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1244                mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1245        }
1246
1247        rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1248
1249        if (rval) {
1250                ql_log(ql_log_warn, vha, 0x7043,
1251                    "Vendor request 84xx mgmt failed.\n");
1252
1253                rval = (DID_ERROR << 16);
1254
1255        } else {
1256                ql_dbg(ql_dbg_user, vha, 0x7044,
1257                    "Vendor request 84xx mgmt completed.\n");
1258
1259                bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1260                bsg_reply->result = DID_OK;
1261
1262                if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1263                        (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1264                        bsg_reply->reply_payload_rcv_len =
1265                                bsg_job->reply_payload.payload_len;
1266
1267                        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1268                                bsg_job->reply_payload.sg_cnt, mgmt_b,
1269                                data_len);
1270                }
1271        }
1272
1273done_unmap_sg:
1274        if (mgmt_b)
1275                dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1276
1277        if (dma_direction == DMA_TO_DEVICE)
1278                dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1279                        bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1280        else if (dma_direction == DMA_FROM_DEVICE)
1281                dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1282                        bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1283
1284exit_mgmt:
1285        dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1286
1287        if (!rval)
1288                bsg_job_done(bsg_job, bsg_reply->result,
1289                               bsg_reply->reply_payload_rcv_len);
1290        return rval;
1291}
1292
1293static int
1294qla24xx_iidma(struct bsg_job *bsg_job)
1295{
1296        struct fc_bsg_request *bsg_request = bsg_job->request;
1297        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1298        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1299        scsi_qla_host_t *vha = shost_priv(host);
1300        int rval = 0;
1301        struct qla_port_param *port_param = NULL;
1302        fc_port_t *fcport = NULL;
1303        int found = 0;
1304        uint16_t mb[MAILBOX_REGISTER_COUNT];
1305        uint8_t *rsp_ptr = NULL;
1306
1307        if (!IS_IIDMA_CAPABLE(vha->hw)) {
1308                ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1309                return -EINVAL;
1310        }
1311
1312        port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1313        if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1314                ql_log(ql_log_warn, vha, 0x7048,
1315                    "Invalid destination type.\n");
1316                return -EINVAL;
1317        }
1318
1319        list_for_each_entry(fcport, &vha->vp_fcports, list) {
1320                if (fcport->port_type != FCT_TARGET)
1321                        continue;
1322
1323                if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1324                        fcport->port_name, sizeof(fcport->port_name)))
1325                        continue;
1326
1327                found = 1;
1328                break;
1329        }
1330
1331        if (!found) {
1332                ql_log(ql_log_warn, vha, 0x7049,
1333                    "Failed to find port.\n");
1334                return -EINVAL;
1335        }
1336
1337        if (atomic_read(&fcport->state) != FCS_ONLINE) {
1338                ql_log(ql_log_warn, vha, 0x704a,
1339                    "Port is not online.\n");
1340                return -EINVAL;
1341        }
1342
1343        if (fcport->flags & FCF_LOGIN_NEEDED) {
1344                ql_log(ql_log_warn, vha, 0x704b,
1345                    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1346                return -EINVAL;
1347        }
1348
1349        if (port_param->mode)
1350                rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1351                        port_param->speed, mb);
1352        else
1353                rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1354                        &port_param->speed, mb);
1355
1356        if (rval) {
1357                ql_log(ql_log_warn, vha, 0x704c,
1358                    "iIDMA cmd failed for %8phN -- "
1359                    "%04x %x %04x %04x.\n", fcport->port_name,
1360                    rval, fcport->fp_speed, mb[0], mb[1]);
1361                rval = (DID_ERROR << 16);
1362        } else {
1363                if (!port_param->mode) {
1364                        bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1365                                sizeof(struct qla_port_param);
1366
1367                        rsp_ptr = ((uint8_t *)bsg_reply) +
1368                                sizeof(struct fc_bsg_reply);
1369
1370                        memcpy(rsp_ptr, port_param,
1371                                sizeof(struct qla_port_param));
1372                }
1373
1374                bsg_reply->result = DID_OK;
1375                bsg_job_done(bsg_job, bsg_reply->result,
1376                               bsg_reply->reply_payload_rcv_len);
1377        }
1378
1379        return rval;
1380}
1381
1382static int
1383qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1384        uint8_t is_update)
1385{
1386        struct fc_bsg_request *bsg_request = bsg_job->request;
1387        uint32_t start = 0;
1388        int valid = 0;
1389        struct qla_hw_data *ha = vha->hw;
1390
1391        if (unlikely(pci_channel_offline(ha->pdev)))
1392                return -EINVAL;
1393
1394        start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1395        if (start > ha->optrom_size) {
1396                ql_log(ql_log_warn, vha, 0x7055,
1397                    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1398                return -EINVAL;
1399        }
1400
1401        if (ha->optrom_state != QLA_SWAITING) {
1402                ql_log(ql_log_info, vha, 0x7056,
1403                    "optrom_state %d.\n", ha->optrom_state);
1404                return -EBUSY;
1405        }
1406
1407        ha->optrom_region_start = start;
1408        ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1409        if (is_update) {
1410                if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1411                        valid = 1;
1412                else if (start == (ha->flt_region_boot * 4) ||
1413                    start == (ha->flt_region_fw * 4))
1414                        valid = 1;
1415                else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1416                    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1417                        valid = 1;
1418                if (!valid) {
1419                        ql_log(ql_log_warn, vha, 0x7058,
1420                            "Invalid start region 0x%x/0x%x.\n", start,
1421                            bsg_job->request_payload.payload_len);
1422                        return -EINVAL;
1423                }
1424
1425                ha->optrom_region_size = start +
1426                    bsg_job->request_payload.payload_len > ha->optrom_size ?
1427                    ha->optrom_size - start :
1428                    bsg_job->request_payload.payload_len;
1429                ha->optrom_state = QLA_SWRITING;
1430        } else {
1431                ha->optrom_region_size = start +
1432                    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1433                    ha->optrom_size - start :
1434                    bsg_job->reply_payload.payload_len;
1435                ha->optrom_state = QLA_SREADING;
1436        }
1437
1438        ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1439        if (!ha->optrom_buffer) {
1440                ql_log(ql_log_warn, vha, 0x7059,
1441                    "Read: Unable to allocate memory for optrom retrieval "
1442                    "(%x)\n", ha->optrom_region_size);
1443
1444                ha->optrom_state = QLA_SWAITING;
1445                return -ENOMEM;
1446        }
1447
1448        return 0;
1449}
1450
1451static int
1452qla2x00_read_optrom(struct bsg_job *bsg_job)
1453{
1454        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1455        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1456        scsi_qla_host_t *vha = shost_priv(host);
1457        struct qla_hw_data *ha = vha->hw;
1458        int rval = 0;
1459
1460        if (ha->flags.nic_core_reset_hdlr_active)
1461                return -EBUSY;
1462
1463        mutex_lock(&ha->optrom_mutex);
1464        rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1465        if (rval) {
1466                mutex_unlock(&ha->optrom_mutex);
1467                return rval;
1468        }
1469
1470        ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1471            ha->optrom_region_start, ha->optrom_region_size);
1472
1473        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1474            bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1475            ha->optrom_region_size);
1476
1477        bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1478        bsg_reply->result = DID_OK;
1479        vfree(ha->optrom_buffer);
1480        ha->optrom_buffer = NULL;
1481        ha->optrom_state = QLA_SWAITING;
1482        mutex_unlock(&ha->optrom_mutex);
1483        bsg_job_done(bsg_job, bsg_reply->result,
1484                       bsg_reply->reply_payload_rcv_len);
1485        return rval;
1486}
1487
1488static int
1489qla2x00_update_optrom(struct bsg_job *bsg_job)
1490{
1491        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1492        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1493        scsi_qla_host_t *vha = shost_priv(host);
1494        struct qla_hw_data *ha = vha->hw;
1495        int rval = 0;
1496
1497        mutex_lock(&ha->optrom_mutex);
1498        rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1499        if (rval) {
1500                mutex_unlock(&ha->optrom_mutex);
1501                return rval;
1502        }
1503
1504        /* Set the isp82xx_no_md_cap not to capture minidump */
1505        ha->flags.isp82xx_no_md_cap = 1;
1506
1507        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1508            bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1509            ha->optrom_region_size);
1510
1511        ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1512            ha->optrom_region_start, ha->optrom_region_size);
1513
1514        bsg_reply->result = DID_OK;
1515        vfree(ha->optrom_buffer);
1516        ha->optrom_buffer = NULL;
1517        ha->optrom_state = QLA_SWAITING;
1518        mutex_unlock(&ha->optrom_mutex);
1519        bsg_job_done(bsg_job, bsg_reply->result,
1520                       bsg_reply->reply_payload_rcv_len);
1521        return rval;
1522}
1523
1524static int
1525qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1526{
1527        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1528        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1529        scsi_qla_host_t *vha = shost_priv(host);
1530        struct qla_hw_data *ha = vha->hw;
1531        int rval = 0;
1532        uint8_t bsg[DMA_POOL_SIZE];
1533        struct qla_image_version_list *list = (void *)bsg;
1534        struct qla_image_version *image;
1535        uint32_t count;
1536        dma_addr_t sfp_dma;
1537        void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1538        if (!sfp) {
1539                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1540                    EXT_STATUS_NO_MEMORY;
1541                goto done;
1542        }
1543
1544        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1545            bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1546
1547        image = list->version;
1548        count = list->count;
1549        while (count--) {
1550                memcpy(sfp, &image->field_info, sizeof(image->field_info));
1551                rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1552                    image->field_address.device, image->field_address.offset,
1553                    sizeof(image->field_info), image->field_address.option);
1554                if (rval) {
1555                        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1556                            EXT_STATUS_MAILBOX;
1557                        goto dealloc;
1558                }
1559                image++;
1560        }
1561
1562        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1563
1564dealloc:
1565        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1566
1567done:
1568        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1569        bsg_reply->result = DID_OK << 16;
1570        bsg_job_done(bsg_job, bsg_reply->result,
1571                       bsg_reply->reply_payload_rcv_len);
1572
1573        return 0;
1574}
1575
1576static int
1577qla2x00_read_fru_status(struct bsg_job *bsg_job)
1578{
1579        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1580        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1581        scsi_qla_host_t *vha = shost_priv(host);
1582        struct qla_hw_data *ha = vha->hw;
1583        int rval = 0;
1584        uint8_t bsg[DMA_POOL_SIZE];
1585        struct qla_status_reg *sr = (void *)bsg;
1586        dma_addr_t sfp_dma;
1587        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1588        if (!sfp) {
1589                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1590                    EXT_STATUS_NO_MEMORY;
1591                goto done;
1592        }
1593
1594        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1595            bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1596
1597        rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1598            sr->field_address.device, sr->field_address.offset,
1599            sizeof(sr->status_reg), sr->field_address.option);
1600        sr->status_reg = *sfp;
1601
1602        if (rval) {
1603                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1604                    EXT_STATUS_MAILBOX;
1605                goto dealloc;
1606        }
1607
1608        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1609            bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1610
1611        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1612
1613dealloc:
1614        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1615
1616done:
1617        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1618        bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1619        bsg_reply->result = DID_OK << 16;
1620        bsg_job_done(bsg_job, bsg_reply->result,
1621                       bsg_reply->reply_payload_rcv_len);
1622
1623        return 0;
1624}
1625
1626static int
1627qla2x00_write_fru_status(struct bsg_job *bsg_job)
1628{
1629        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1630        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1631        scsi_qla_host_t *vha = shost_priv(host);
1632        struct qla_hw_data *ha = vha->hw;
1633        int rval = 0;
1634        uint8_t bsg[DMA_POOL_SIZE];
1635        struct qla_status_reg *sr = (void *)bsg;
1636        dma_addr_t sfp_dma;
1637        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1638        if (!sfp) {
1639                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1640                    EXT_STATUS_NO_MEMORY;
1641                goto done;
1642        }
1643
1644        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1645            bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1646
1647        *sfp = sr->status_reg;
1648        rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1649            sr->field_address.device, sr->field_address.offset,
1650            sizeof(sr->status_reg), sr->field_address.option);
1651
1652        if (rval) {
1653                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1654                    EXT_STATUS_MAILBOX;
1655                goto dealloc;
1656        }
1657
1658        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1659
1660dealloc:
1661        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1662
1663done:
1664        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1665        bsg_reply->result = DID_OK << 16;
1666        bsg_job_done(bsg_job, bsg_reply->result,
1667                       bsg_reply->reply_payload_rcv_len);
1668
1669        return 0;
1670}
1671
1672static int
1673qla2x00_write_i2c(struct bsg_job *bsg_job)
1674{
1675        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1676        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1677        scsi_qla_host_t *vha = shost_priv(host);
1678        struct qla_hw_data *ha = vha->hw;
1679        int rval = 0;
1680        uint8_t bsg[DMA_POOL_SIZE];
1681        struct qla_i2c_access *i2c = (void *)bsg;
1682        dma_addr_t sfp_dma;
1683        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1684        if (!sfp) {
1685                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1686                    EXT_STATUS_NO_MEMORY;
1687                goto done;
1688        }
1689
1690        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1691            bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1692
1693        memcpy(sfp, i2c->buffer, i2c->length);
1694        rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1695            i2c->device, i2c->offset, i2c->length, i2c->option);
1696
1697        if (rval) {
1698                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1699                    EXT_STATUS_MAILBOX;
1700                goto dealloc;
1701        }
1702
1703        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1704
1705dealloc:
1706        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1707
1708done:
1709        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1710        bsg_reply->result = DID_OK << 16;
1711        bsg_job_done(bsg_job, bsg_reply->result,
1712                       bsg_reply->reply_payload_rcv_len);
1713
1714        return 0;
1715}
1716
1717static int
1718qla2x00_read_i2c(struct bsg_job *bsg_job)
1719{
1720        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1721        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1722        scsi_qla_host_t *vha = shost_priv(host);
1723        struct qla_hw_data *ha = vha->hw;
1724        int rval = 0;
1725        uint8_t bsg[DMA_POOL_SIZE];
1726        struct qla_i2c_access *i2c = (void *)bsg;
1727        dma_addr_t sfp_dma;
1728        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1729        if (!sfp) {
1730                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1731                    EXT_STATUS_NO_MEMORY;
1732                goto done;
1733        }
1734
1735        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1736            bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1737
1738        rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1739                i2c->device, i2c->offset, i2c->length, i2c->option);
1740
1741        if (rval) {
1742                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1743                    EXT_STATUS_MAILBOX;
1744                goto dealloc;
1745        }
1746
1747        memcpy(i2c->buffer, sfp, i2c->length);
1748        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1749            bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1750
1751        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1752
1753dealloc:
1754        dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1755
1756done:
1757        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1758        bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1759        bsg_reply->result = DID_OK << 16;
1760        bsg_job_done(bsg_job, bsg_reply->result,
1761                       bsg_reply->reply_payload_rcv_len);
1762
1763        return 0;
1764}
1765
1766static int
1767qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1768{
1769        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1770        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1771        scsi_qla_host_t *vha = shost_priv(host);
1772        struct qla_hw_data *ha = vha->hw;
1773        uint32_t rval = EXT_STATUS_OK;
1774        uint16_t req_sg_cnt = 0;
1775        uint16_t rsp_sg_cnt = 0;
1776        uint16_t nextlid = 0;
1777        uint32_t tot_dsds;
1778        srb_t *sp = NULL;
1779        uint32_t req_data_len = 0;
1780        uint32_t rsp_data_len = 0;
1781
1782        /* Check the type of the adapter */
1783        if (!IS_BIDI_CAPABLE(ha)) {
1784                ql_log(ql_log_warn, vha, 0x70a0,
1785                        "This adapter is not supported\n");
1786                rval = EXT_STATUS_NOT_SUPPORTED;
1787                goto done;
1788        }
1789
1790        if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1791                test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1792                test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1793                rval =  EXT_STATUS_BUSY;
1794                goto done;
1795        }
1796
1797        /* Check if host is online */
1798        if (!vha->flags.online) {
1799                ql_log(ql_log_warn, vha, 0x70a1,
1800                        "Host is not online\n");
1801                rval = EXT_STATUS_DEVICE_OFFLINE;
1802                goto done;
1803        }
1804
1805        /* Check if cable is plugged in or not */
1806        if (vha->device_flags & DFLG_NO_CABLE) {
1807                ql_log(ql_log_warn, vha, 0x70a2,
1808                        "Cable is unplugged...\n");
1809                rval = EXT_STATUS_INVALID_CFG;
1810                goto done;
1811        }
1812
1813        /* Check if the switch is connected or not */
1814        if (ha->current_topology != ISP_CFG_F) {
1815                ql_log(ql_log_warn, vha, 0x70a3,
1816                        "Host is not connected to the switch\n");
1817                rval = EXT_STATUS_INVALID_CFG;
1818                goto done;
1819        }
1820
1821        /* Check if operating mode is P2P */
1822        if (ha->operating_mode != P2P) {
1823                ql_log(ql_log_warn, vha, 0x70a4,
1824                    "Host operating mode is not P2p\n");
1825                rval = EXT_STATUS_INVALID_CFG;
1826                goto done;
1827        }
1828
1829        mutex_lock(&ha->selflogin_lock);
1830        if (vha->self_login_loop_id == 0) {
1831                /* Initialize all required  fields of fcport */
1832                vha->bidir_fcport.vha = vha;
1833                vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1834                vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1835                vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1836                vha->bidir_fcport.loop_id = vha->loop_id;
1837
1838                if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1839                        ql_log(ql_log_warn, vha, 0x70a7,
1840                            "Failed to login port %06X for bidirectional IOCB\n",
1841                            vha->bidir_fcport.d_id.b24);
1842                        mutex_unlock(&ha->selflogin_lock);
1843                        rval = EXT_STATUS_MAILBOX;
1844                        goto done;
1845                }
1846                vha->self_login_loop_id = nextlid - 1;
1847
1848        }
1849        /* Assign the self login loop id to fcport */
1850        mutex_unlock(&ha->selflogin_lock);
1851
1852        vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1853
1854        req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1855                bsg_job->request_payload.sg_list,
1856                bsg_job->request_payload.sg_cnt,
1857                DMA_TO_DEVICE);
1858
1859        if (!req_sg_cnt) {
1860                rval = EXT_STATUS_NO_MEMORY;
1861                goto done;
1862        }
1863
1864        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1865                bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1866                DMA_FROM_DEVICE);
1867
1868        if (!rsp_sg_cnt) {
1869                rval = EXT_STATUS_NO_MEMORY;
1870                goto done_unmap_req_sg;
1871        }
1872
1873        if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1874                (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1875                ql_dbg(ql_dbg_user, vha, 0x70a9,
1876                    "Dma mapping resulted in different sg counts "
1877                    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1878                    "%x dma_reply_sg_cnt: %x]\n",
1879                    bsg_job->request_payload.sg_cnt, req_sg_cnt,
1880                    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1881                rval = EXT_STATUS_NO_MEMORY;
1882                goto done_unmap_sg;
1883        }
1884
1885        if (req_data_len != rsp_data_len) {
1886                rval = EXT_STATUS_BUSY;
1887                ql_log(ql_log_warn, vha, 0x70aa,
1888                    "req_data_len != rsp_data_len\n");
1889                goto done_unmap_sg;
1890        }
1891
1892        req_data_len = bsg_job->request_payload.payload_len;
1893        rsp_data_len = bsg_job->reply_payload.payload_len;
1894
1895
1896        /* Alloc SRB structure */
1897        sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1898        if (!sp) {
1899                ql_dbg(ql_dbg_user, vha, 0x70ac,
1900                    "Alloc SRB structure failed\n");
1901                rval = EXT_STATUS_NO_MEMORY;
1902                goto done_unmap_sg;
1903        }
1904
1905        /*Populate srb->ctx with bidir ctx*/
1906        sp->u.bsg_job = bsg_job;
1907        sp->free = qla2x00_bsg_sp_free;
1908        sp->type = SRB_BIDI_CMD;
1909        sp->done = qla2x00_bsg_job_done;
1910
1911        /* Add the read and write sg count */
1912        tot_dsds = rsp_sg_cnt + req_sg_cnt;
1913
1914        rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1915        if (rval != EXT_STATUS_OK)
1916                goto done_free_srb;
1917        /* the bsg request  will be completed in the interrupt handler */
1918        return rval;
1919
1920done_free_srb:
1921        mempool_free(sp, ha->srb_mempool);
1922done_unmap_sg:
1923        dma_unmap_sg(&ha->pdev->dev,
1924            bsg_job->reply_payload.sg_list,
1925            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1926done_unmap_req_sg:
1927        dma_unmap_sg(&ha->pdev->dev,
1928            bsg_job->request_payload.sg_list,
1929            bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1930done:
1931
1932        /* Return an error vendor specific response
1933         * and complete the bsg request
1934         */
1935        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1936        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1937        bsg_reply->reply_payload_rcv_len = 0;
1938        bsg_reply->result = (DID_OK) << 16;
1939        bsg_job_done(bsg_job, bsg_reply->result,
1940                       bsg_reply->reply_payload_rcv_len);
1941        /* Always return success, vendor rsp carries correct status */
1942        return 0;
1943}
1944
1945static int
1946qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1947{
1948        struct fc_bsg_request *bsg_request = bsg_job->request;
1949        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1950        scsi_qla_host_t *vha = shost_priv(host);
1951        struct qla_hw_data *ha = vha->hw;
1952        int rval = (DRIVER_ERROR << 16);
1953        struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1954        srb_t *sp;
1955        int req_sg_cnt = 0, rsp_sg_cnt = 0;
1956        struct fc_port *fcport;
1957        char  *type = "FC_BSG_HST_FX_MGMT";
1958
1959        /* Copy the IOCB specific information */
1960        piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1961            &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1962
1963        /* Dump the vendor information */
1964        ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1965            (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1966
1967        if (!vha->flags.online) {
1968                ql_log(ql_log_warn, vha, 0x70d0,
1969                    "Host is not online.\n");
1970                rval = -EIO;
1971                goto done;
1972        }
1973
1974        if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1975                req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1976                    bsg_job->request_payload.sg_list,
1977                    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1978                if (!req_sg_cnt) {
1979                        ql_log(ql_log_warn, vha, 0x70c7,
1980                            "dma_map_sg return %d for request\n", req_sg_cnt);
1981                        rval = -ENOMEM;
1982                        goto done;
1983                }
1984        }
1985
1986        if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1987                rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1988                    bsg_job->reply_payload.sg_list,
1989                    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1990                if (!rsp_sg_cnt) {
1991                        ql_log(ql_log_warn, vha, 0x70c8,
1992                            "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1993                        rval = -ENOMEM;
1994                        goto done_unmap_req_sg;
1995                }
1996        }
1997
1998        ql_dbg(ql_dbg_user, vha, 0x70c9,
1999            "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2000            "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2001            req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2002
2003        /* Allocate a dummy fcport structure, since functions preparing the
2004         * IOCB and mailbox command retrieves port specific information
2005         * from fcport structure. For Host based ELS commands there will be
2006         * no fcport structure allocated
2007         */
2008        fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2009        if (!fcport) {
2010                ql_log(ql_log_warn, vha, 0x70ca,
2011                    "Failed to allocate fcport.\n");
2012                rval = -ENOMEM;
2013                goto done_unmap_rsp_sg;
2014        }
2015
2016        /* Alloc SRB structure */
2017        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2018        if (!sp) {
2019                ql_log(ql_log_warn, vha, 0x70cb,
2020                    "qla2x00_get_sp failed.\n");
2021                rval = -ENOMEM;
2022                goto done_free_fcport;
2023        }
2024
2025        /* Initialize all required  fields of fcport */
2026        fcport->vha = vha;
2027        fcport->loop_id = piocb_rqst->dataword;
2028
2029        sp->type = SRB_FXIOCB_BCMD;
2030        sp->name = "bsg_fx_mgmt";
2031        sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2032        sp->u.bsg_job = bsg_job;
2033        sp->free = qla2x00_bsg_sp_free;
2034        sp->done = qla2x00_bsg_job_done;
2035
2036        ql_dbg(ql_dbg_user, vha, 0x70cc,
2037            "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2038            type, piocb_rqst->func_type, fcport->loop_id);
2039
2040        rval = qla2x00_start_sp(sp);
2041        if (rval != QLA_SUCCESS) {
2042                ql_log(ql_log_warn, vha, 0x70cd,
2043                    "qla2x00_start_sp failed=%d.\n", rval);
2044                mempool_free(sp, ha->srb_mempool);
2045                rval = -EIO;
2046                goto done_free_fcport;
2047        }
2048        return rval;
2049
2050done_free_fcport:
2051        kfree(fcport);
2052
2053done_unmap_rsp_sg:
2054        if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2055                dma_unmap_sg(&ha->pdev->dev,
2056                    bsg_job->reply_payload.sg_list,
2057                    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2058done_unmap_req_sg:
2059        if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2060                dma_unmap_sg(&ha->pdev->dev,
2061                    bsg_job->request_payload.sg_list,
2062                    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2063
2064done:
2065        return rval;
2066}
2067
2068static int
2069qla26xx_serdes_op(struct bsg_job *bsg_job)
2070{
2071        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2072        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2073        scsi_qla_host_t *vha = shost_priv(host);
2074        int rval = 0;
2075        struct qla_serdes_reg sr;
2076
2077        memset(&sr, 0, sizeof(sr));
2078
2079        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2080            bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2081
2082        switch (sr.cmd) {
2083        case INT_SC_SERDES_WRITE_REG:
2084                rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2085                bsg_reply->reply_payload_rcv_len = 0;
2086                break;
2087        case INT_SC_SERDES_READ_REG:
2088                rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2089                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2090                    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2091                bsg_reply->reply_payload_rcv_len = sizeof(sr);
2092                break;
2093        default:
2094                ql_dbg(ql_dbg_user, vha, 0x708c,
2095                    "Unknown serdes cmd %x.\n", sr.cmd);
2096                rval = -EINVAL;
2097                break;
2098        }
2099
2100        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2101            rval ? EXT_STATUS_MAILBOX : 0;
2102
2103        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2104        bsg_reply->result = DID_OK << 16;
2105        bsg_job_done(bsg_job, bsg_reply->result,
2106                       bsg_reply->reply_payload_rcv_len);
2107        return 0;
2108}
2109
2110static int
2111qla8044_serdes_op(struct bsg_job *bsg_job)
2112{
2113        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2114        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2115        scsi_qla_host_t *vha = shost_priv(host);
2116        int rval = 0;
2117        struct qla_serdes_reg_ex sr;
2118
2119        memset(&sr, 0, sizeof(sr));
2120
2121        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2122            bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2123
2124        switch (sr.cmd) {
2125        case INT_SC_SERDES_WRITE_REG:
2126                rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2127                bsg_reply->reply_payload_rcv_len = 0;
2128                break;
2129        case INT_SC_SERDES_READ_REG:
2130                rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2131                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2132                    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2133                bsg_reply->reply_payload_rcv_len = sizeof(sr);
2134                break;
2135        default:
2136                ql_dbg(ql_dbg_user, vha, 0x7020,
2137                    "Unknown serdes cmd %x.\n", sr.cmd);
2138                rval = -EINVAL;
2139                break;
2140        }
2141
2142        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2143            rval ? EXT_STATUS_MAILBOX : 0;
2144
2145        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2146        bsg_reply->result = DID_OK << 16;
2147        bsg_job_done(bsg_job, bsg_reply->result,
2148                       bsg_reply->reply_payload_rcv_len);
2149        return 0;
2150}
2151
2152static int
2153qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2154{
2155        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2156        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2157        scsi_qla_host_t *vha = shost_priv(host);
2158        struct qla_hw_data *ha = vha->hw;
2159        struct qla_flash_update_caps cap;
2160
2161        if (!(IS_QLA27XX(ha)))
2162                return -EPERM;
2163
2164        memset(&cap, 0, sizeof(cap));
2165        cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2166                           (uint64_t)ha->fw_attributes_ext[0] << 32 |
2167                           (uint64_t)ha->fw_attributes_h << 16 |
2168                           (uint64_t)ha->fw_attributes;
2169
2170        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2171            bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2172        bsg_reply->reply_payload_rcv_len = sizeof(cap);
2173
2174        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2175            EXT_STATUS_OK;
2176
2177        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2178        bsg_reply->result = DID_OK << 16;
2179        bsg_job_done(bsg_job, bsg_reply->result,
2180                       bsg_reply->reply_payload_rcv_len);
2181        return 0;
2182}
2183
2184static int
2185qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2186{
2187        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2188        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2189        scsi_qla_host_t *vha = shost_priv(host);
2190        struct qla_hw_data *ha = vha->hw;
2191        uint64_t online_fw_attr = 0;
2192        struct qla_flash_update_caps cap;
2193
2194        if (!(IS_QLA27XX(ha)))
2195                return -EPERM;
2196
2197        memset(&cap, 0, sizeof(cap));
2198        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2199            bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2200
2201        online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2202                         (uint64_t)ha->fw_attributes_ext[0] << 32 |
2203                         (uint64_t)ha->fw_attributes_h << 16 |
2204                         (uint64_t)ha->fw_attributes;
2205
2206        if (online_fw_attr != cap.capabilities) {
2207                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2208                    EXT_STATUS_INVALID_PARAM;
2209                return -EINVAL;
2210        }
2211
2212        if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2213                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2214                    EXT_STATUS_INVALID_PARAM;
2215                return -EINVAL;
2216        }
2217
2218        bsg_reply->reply_payload_rcv_len = 0;
2219
2220        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2221            EXT_STATUS_OK;
2222
2223        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2224        bsg_reply->result = DID_OK << 16;
2225        bsg_job_done(bsg_job, bsg_reply->result,
2226                       bsg_reply->reply_payload_rcv_len);
2227        return 0;
2228}
2229
2230static int
2231qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2232{
2233        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2234        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2235        scsi_qla_host_t *vha = shost_priv(host);
2236        struct qla_hw_data *ha = vha->hw;
2237        struct qla_bbcr_data bbcr;
2238        uint16_t loop_id, topo, sw_cap;
2239        uint8_t domain, area, al_pa, state;
2240        int rval;
2241
2242        if (!(IS_QLA27XX(ha)))
2243                return -EPERM;
2244
2245        memset(&bbcr, 0, sizeof(bbcr));
2246
2247        if (vha->flags.bbcr_enable)
2248                bbcr.status = QLA_BBCR_STATUS_ENABLED;
2249        else
2250                bbcr.status = QLA_BBCR_STATUS_DISABLED;
2251
2252        if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2253                rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2254                        &area, &domain, &topo, &sw_cap);
2255                if (rval != QLA_SUCCESS) {
2256                        bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2257                        bbcr.state = QLA_BBCR_STATE_OFFLINE;
2258                        bbcr.mbx1 = loop_id;
2259                        goto done;
2260                }
2261
2262                state = (vha->bbcr >> 12) & 0x1;
2263
2264                if (state) {
2265                        bbcr.state = QLA_BBCR_STATE_OFFLINE;
2266                        bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2267                } else {
2268                        bbcr.state = QLA_BBCR_STATE_ONLINE;
2269                        bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2270                }
2271
2272                bbcr.configured_bbscn = vha->bbcr & 0xf;
2273        }
2274
2275done:
2276        sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2277                bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2278        bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2279
2280        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2281
2282        bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2283        bsg_reply->result = DID_OK << 16;
2284        bsg_job_done(bsg_job, bsg_reply->result,
2285                       bsg_reply->reply_payload_rcv_len);
2286        return 0;
2287}
2288
2289static int
2290qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2291{
2292        struct fc_bsg_request *bsg_request = bsg_job->request;
2293        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2294        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2295        scsi_qla_host_t *vha = shost_priv(host);
2296        struct qla_hw_data *ha = vha->hw;
2297        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2298        struct link_statistics *stats = NULL;
2299        dma_addr_t stats_dma;
2300        int rval;
2301        uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2302        uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2303
2304        if (test_bit(UNLOADING, &vha->dpc_flags))
2305                return -ENODEV;
2306
2307        if (unlikely(pci_channel_offline(ha->pdev)))
2308                return -ENODEV;
2309
2310        if (qla2x00_reset_active(vha))
2311                return -EBUSY;
2312
2313        if (!IS_FWI2_CAPABLE(ha))
2314                return -EPERM;
2315
2316        stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
2317                                    &stats_dma, GFP_KERNEL);
2318        if (!stats) {
2319                ql_log(ql_log_warn, vha, 0x70e2,
2320                    "Failed to allocate memory for stats.\n");
2321                return -ENOMEM;
2322        }
2323
2324        rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2325
2326        if (rval == QLA_SUCCESS) {
2327                ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2328                    (uint8_t *)stats, sizeof(*stats));
2329                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2330                        bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2331        }
2332
2333        bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2334        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2335            rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2336
2337        bsg_job->reply_len = sizeof(*bsg_reply);
2338        bsg_reply->result = DID_OK << 16;
2339        bsg_job_done(bsg_job, bsg_reply->result,
2340                       bsg_reply->reply_payload_rcv_len);
2341
2342        dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2343                stats, stats_dma);
2344
2345        return 0;
2346}
2347
2348static int
2349qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2350{
2351        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2352        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2353        scsi_qla_host_t *vha = shost_priv(host);
2354        int rval;
2355        struct qla_dport_diag *dd;
2356
2357        if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2358                return -EPERM;
2359
2360        dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2361        if (!dd) {
2362                ql_log(ql_log_warn, vha, 0x70db,
2363                    "Failed to allocate memory for dport.\n");
2364                return -ENOMEM;
2365        }
2366
2367        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2368            bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2369
2370        rval = qla26xx_dport_diagnostics(
2371            vha, dd->buf, sizeof(dd->buf), dd->options);
2372        if (rval == QLA_SUCCESS) {
2373                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2374                    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2375        }
2376
2377        bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2378        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2379            rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2380
2381        bsg_job->reply_len = sizeof(*bsg_reply);
2382        bsg_reply->result = DID_OK << 16;
2383        bsg_job_done(bsg_job, bsg_reply->result,
2384                       bsg_reply->reply_payload_rcv_len);
2385
2386        kfree(dd);
2387
2388        return 0;
2389}
2390
2391static int
2392qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2393{
2394        struct fc_bsg_request *bsg_request = bsg_job->request;
2395
2396        switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2397        case QL_VND_LOOPBACK:
2398                return qla2x00_process_loopback(bsg_job);
2399
2400        case QL_VND_A84_RESET:
2401                return qla84xx_reset(bsg_job);
2402
2403        case QL_VND_A84_UPDATE_FW:
2404                return qla84xx_updatefw(bsg_job);
2405
2406        case QL_VND_A84_MGMT_CMD:
2407                return qla84xx_mgmt_cmd(bsg_job);
2408
2409        case QL_VND_IIDMA:
2410                return qla24xx_iidma(bsg_job);
2411
2412        case QL_VND_FCP_PRIO_CFG_CMD:
2413                return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2414
2415        case QL_VND_READ_FLASH:
2416                return qla2x00_read_optrom(bsg_job);
2417
2418        case QL_VND_UPDATE_FLASH:
2419                return qla2x00_update_optrom(bsg_job);
2420
2421        case QL_VND_SET_FRU_VERSION:
2422                return qla2x00_update_fru_versions(bsg_job);
2423
2424        case QL_VND_READ_FRU_STATUS:
2425                return qla2x00_read_fru_status(bsg_job);
2426
2427        case QL_VND_WRITE_FRU_STATUS:
2428                return qla2x00_write_fru_status(bsg_job);
2429
2430        case QL_VND_WRITE_I2C:
2431                return qla2x00_write_i2c(bsg_job);
2432
2433        case QL_VND_READ_I2C:
2434                return qla2x00_read_i2c(bsg_job);
2435
2436        case QL_VND_DIAG_IO_CMD:
2437                return qla24xx_process_bidir_cmd(bsg_job);
2438
2439        case QL_VND_FX00_MGMT_CMD:
2440                return qlafx00_mgmt_cmd(bsg_job);
2441
2442        case QL_VND_SERDES_OP:
2443                return qla26xx_serdes_op(bsg_job);
2444
2445        case QL_VND_SERDES_OP_EX:
2446                return qla8044_serdes_op(bsg_job);
2447
2448        case QL_VND_GET_FLASH_UPDATE_CAPS:
2449                return qla27xx_get_flash_upd_cap(bsg_job);
2450
2451        case QL_VND_SET_FLASH_UPDATE_CAPS:
2452                return qla27xx_set_flash_upd_cap(bsg_job);
2453
2454        case QL_VND_GET_BBCR_DATA:
2455                return qla27xx_get_bbcr_data(bsg_job);
2456
2457        case QL_VND_GET_PRIV_STATS:
2458        case QL_VND_GET_PRIV_STATS_EX:
2459                return qla2x00_get_priv_stats(bsg_job);
2460
2461        case QL_VND_DPORT_DIAGNOSTICS:
2462                return qla2x00_do_dport_diagnostics(bsg_job);
2463
2464        default:
2465                return -ENOSYS;
2466        }
2467}
2468
2469int
2470qla24xx_bsg_request(struct bsg_job *bsg_job)
2471{
2472        struct fc_bsg_request *bsg_request = bsg_job->request;
2473        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2474        int ret = -EINVAL;
2475        struct fc_rport *rport;
2476        struct Scsi_Host *host;
2477        scsi_qla_host_t *vha;
2478
2479        /* In case no data transferred. */
2480        bsg_reply->reply_payload_rcv_len = 0;
2481
2482        if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2483                rport = fc_bsg_to_rport(bsg_job);
2484                host = rport_to_shost(rport);
2485                vha = shost_priv(host);
2486        } else {
2487                host = fc_bsg_to_shost(bsg_job);
2488                vha = shost_priv(host);
2489        }
2490
2491        if (qla2x00_reset_active(vha)) {
2492                ql_dbg(ql_dbg_user, vha, 0x709f,
2493                    "BSG: ISP abort active/needed -- cmd=%d.\n",
2494                    bsg_request->msgcode);
2495                return -EBUSY;
2496        }
2497
2498        ql_dbg(ql_dbg_user, vha, 0x7000,
2499            "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2500
2501        switch (bsg_request->msgcode) {
2502        case FC_BSG_RPT_ELS:
2503        case FC_BSG_HST_ELS_NOLOGIN:
2504                ret = qla2x00_process_els(bsg_job);
2505                break;
2506        case FC_BSG_HST_CT:
2507                ret = qla2x00_process_ct(bsg_job);
2508                break;
2509        case FC_BSG_HST_VENDOR:
2510                ret = qla2x00_process_vendor_specific(bsg_job);
2511                break;
2512        case FC_BSG_HST_ADD_RPORT:
2513        case FC_BSG_HST_DEL_RPORT:
2514        case FC_BSG_RPT_CT:
2515        default:
2516                ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2517                break;
2518        }
2519        return ret;
2520}
2521
2522int
2523qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2524{
2525        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2526        scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2527        struct qla_hw_data *ha = vha->hw;
2528        srb_t *sp;
2529        int cnt, que;
2530        unsigned long flags;
2531        struct req_que *req;
2532
2533        /* find the bsg job from the active list of commands */
2534        spin_lock_irqsave(&ha->hardware_lock, flags);
2535        for (que = 0; que < ha->max_req_queues; que++) {
2536                req = ha->req_q_map[que];
2537                if (!req)
2538                        continue;
2539
2540                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2541                        sp = req->outstanding_cmds[cnt];
2542                        if (sp) {
2543                                if (((sp->type == SRB_CT_CMD) ||
2544                                        (sp->type == SRB_ELS_CMD_HST) ||
2545                                        (sp->type == SRB_FXIOCB_BCMD))
2546                                        && (sp->u.bsg_job == bsg_job)) {
2547                                        req->outstanding_cmds[cnt] = NULL;
2548                                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2549                                        if (ha->isp_ops->abort_command(sp)) {
2550                                                ql_log(ql_log_warn, vha, 0x7089,
2551                                                    "mbx abort_command "
2552                                                    "failed.\n");
2553                                                bsg_reply->result = -EIO;
2554                                        } else {
2555                                                ql_dbg(ql_dbg_user, vha, 0x708a,
2556                                                    "mbx abort_command "
2557                                                    "success.\n");
2558                                                bsg_reply->result = 0;
2559                                        }
2560                                        spin_lock_irqsave(&ha->hardware_lock, flags);
2561                                        goto done;
2562                                }
2563                        }
2564                }
2565        }
2566        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2567        ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2568        bsg_reply->result = -ENXIO;
2569        return 0;
2570
2571done:
2572        spin_unlock_irqrestore(&ha->hardware_lock, flags);
2573        sp->free(sp);
2574        return 0;
2575}
2576