linux/drivers/scsi/be2iscsi/be_cmds.c
<<
>>
Prefs
   1/**
   2 * Copyright (C) 2005 - 2013 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@emulex.com
  12 *
  13 * Emulex
  14 * 3333 Susan Street
  15 * Costa Mesa, CA 92626
  16 */
  17
  18#include <scsi/iscsi_proto.h>
  19
  20#include "be_main.h"
  21#include "be.h"
  22#include "be_mgmt.h"
  23
  24int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
  25{
  26        u32 sreset;
  27        u8 *pci_reset_offset = 0;
  28        u8 *pci_online0_offset = 0;
  29        u8 *pci_online1_offset = 0;
  30        u32 pconline0 = 0;
  31        u32 pconline1 = 0;
  32        u32 i;
  33
  34        pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
  35        pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
  36        pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
  37        sreset = readl((void *)pci_reset_offset);
  38        sreset |= BE2_SET_RESET;
  39        writel(sreset, (void *)pci_reset_offset);
  40
  41        i = 0;
  42        while (sreset & BE2_SET_RESET) {
  43                if (i > 64)
  44                        break;
  45                msleep(100);
  46                sreset = readl((void *)pci_reset_offset);
  47                i++;
  48        }
  49
  50        if (sreset & BE2_SET_RESET) {
  51                printk(KERN_ERR DRV_NAME
  52                       " Soft Reset  did not deassert\n");
  53                return -EIO;
  54        }
  55        pconline1 = BE2_MPU_IRAM_ONLINE;
  56        writel(pconline0, (void *)pci_online0_offset);
  57        writel(pconline1, (void *)pci_online1_offset);
  58
  59        sreset |= BE2_SET_RESET;
  60        writel(sreset, (void *)pci_reset_offset);
  61
  62        i = 0;
  63        while (sreset & BE2_SET_RESET) {
  64                if (i > 64)
  65                        break;
  66                msleep(1);
  67                sreset = readl((void *)pci_reset_offset);
  68                i++;
  69        }
  70        if (sreset & BE2_SET_RESET) {
  71                printk(KERN_ERR DRV_NAME
  72                       " MPU Online Soft Reset did not deassert\n");
  73                return -EIO;
  74        }
  75        return 0;
  76}
  77
  78int be_chk_reset_complete(struct beiscsi_hba *phba)
  79{
  80        unsigned int num_loop;
  81        u8 *mpu_sem = 0;
  82        u32 status;
  83
  84        num_loop = 1000;
  85        mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
  86        msleep(5000);
  87
  88        while (num_loop) {
  89                status = readl((void *)mpu_sem);
  90
  91                if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
  92                        break;
  93                msleep(60);
  94                num_loop--;
  95        }
  96
  97        if ((status & 0x80000000) || (!num_loop)) {
  98                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  99                            "BC_%d : Failed in be_chk_reset_complete"
 100                            "status = 0x%x\n", status);
 101                return -EIO;
 102        }
 103
 104        return 0;
 105}
 106
 107void be_mcc_notify(struct beiscsi_hba *phba)
 108{
 109        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 110        u32 val = 0;
 111
 112        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 113        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 114        iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
 115}
 116
 117unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
 118{
 119        unsigned int tag = 0;
 120
 121        if (phba->ctrl.mcc_tag_available) {
 122                tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
 123                phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
 124                phba->ctrl.mcc_numtag[tag] = 0;
 125        }
 126        if (tag) {
 127                phba->ctrl.mcc_tag_available--;
 128                if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
 129                        phba->ctrl.mcc_alloc_index = 0;
 130                else
 131                        phba->ctrl.mcc_alloc_index++;
 132        }
 133        return tag;
 134}
 135
 136/*
 137 * beiscsi_mccq_compl()- Wait for completion of MBX
 138 * @phba: Driver private structure
 139 * @tag: Tag for the MBX Command
 140 * @wrb: the WRB used for the MBX Command
 141 * @cmd_hdr: IOCTL Hdr for the MBX Cmd
 142 *
 143 * Waits for MBX completion with the passed TAG.
 144 *
 145 * return
 146 * Success: 0
 147 * Failure: Non-Zero
 148 **/
 149int beiscsi_mccq_compl(struct beiscsi_hba *phba,
 150                uint32_t tag, struct be_mcc_wrb **wrb,
 151                void *cmd_hdr)
 152{
 153        int rc = 0;
 154        uint32_t mcc_tag_response;
 155        uint16_t status = 0, addl_status = 0, wrb_num = 0;
 156        struct be_mcc_wrb *temp_wrb;
 157        struct be_cmd_req_hdr *ioctl_hdr;
 158        struct be_cmd_resp_hdr *ioctl_resp_hdr;
 159        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 160
 161        if (beiscsi_error(phba)) {
 162                free_mcc_tag(&phba->ctrl, tag);
 163                return -EIO;
 164        }
 165
 166        /* wait for the mccq completion */
 167        rc = wait_event_interruptible_timeout(
 168                                phba->ctrl.mcc_wait[tag],
 169                                phba->ctrl.mcc_numtag[tag],
 170                                msecs_to_jiffies(
 171                                BEISCSI_HOST_MBX_TIMEOUT));
 172
 173        if (rc <= 0) {
 174                beiscsi_log(phba, KERN_ERR,
 175                            BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 176                            BEISCSI_LOG_CONFIG,
 177                            "BC_%d : MBX Cmd Completion timed out\n");
 178                rc = -EBUSY;
 179
 180                /* decrement the mccq used count */
 181                atomic_dec(&phba->ctrl.mcc_obj.q.used);
 182
 183                goto release_mcc_tag;
 184        } else
 185                rc = 0;
 186
 187        mcc_tag_response = phba->ctrl.mcc_numtag[tag];
 188        status = (mcc_tag_response & CQE_STATUS_MASK);
 189        addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
 190                        CQE_STATUS_ADDL_SHIFT);
 191
 192        if (cmd_hdr) {
 193                ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
 194        } else {
 195                wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
 196                           CQE_STATUS_WRB_SHIFT;
 197                temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
 198                ioctl_hdr = embedded_payload(temp_wrb);
 199
 200                if (wrb)
 201                        *wrb = temp_wrb;
 202        }
 203
 204        if (status || addl_status) {
 205                beiscsi_log(phba, KERN_ERR,
 206                            BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 207                            BEISCSI_LOG_CONFIG,
 208                            "BC_%d : MBX Cmd Failed for "
 209                            "Subsys : %d Opcode : %d with "
 210                            "Status : %d and Extd_Status : %d\n",
 211                            ioctl_hdr->subsystem,
 212                            ioctl_hdr->opcode,
 213                            status, addl_status);
 214
 215                if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
 216                        ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
 217                        beiscsi_log(phba, KERN_WARNING,
 218                                    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 219                                    BEISCSI_LOG_CONFIG,
 220                                    "BC_%d : Insufficent Buffer Error "
 221                                    "Resp_Len : %d Actual_Resp_Len : %d\n",
 222                                    ioctl_resp_hdr->response_length,
 223                                    ioctl_resp_hdr->actual_resp_len);
 224
 225                        rc = -EAGAIN;
 226                        goto release_mcc_tag;
 227                }
 228                rc = -EIO;
 229        }
 230
 231release_mcc_tag:
 232        /* Release the MCC entry */
 233        free_mcc_tag(&phba->ctrl, tag);
 234
 235        return rc;
 236}
 237
 238void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
 239{
 240        spin_lock(&ctrl->mbox_lock);
 241        tag = tag & 0x000000FF;
 242        ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
 243        if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
 244                ctrl->mcc_free_index = 0;
 245        else
 246                ctrl->mcc_free_index++;
 247        ctrl->mcc_tag_available++;
 248        spin_unlock(&ctrl->mbox_lock);
 249}
 250
 251bool is_link_state_evt(u32 trailer)
 252{
 253        return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 254                  ASYNC_TRAILER_EVENT_CODE_MASK) ==
 255                  ASYNC_EVENT_CODE_LINK_STATE);
 256}
 257
 258static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
 259{
 260        if (compl->flags != 0) {
 261                compl->flags = le32_to_cpu(compl->flags);
 262                WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
 263                return true;
 264        } else
 265                return false;
 266}
 267
 268static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
 269{
 270        compl->flags = 0;
 271}
 272
 273/*
 274 * be_mcc_compl_process()- Check the MBX comapletion status
 275 * @ctrl: Function specific MBX data structure
 276 * @compl: Completion status of MBX Command
 277 *
 278 * Check for the MBX completion status when BMBX method used
 279 *
 280 * return
 281 * Success: Zero
 282 * Failure: Non-Zero
 283 **/
 284static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
 285                                struct be_mcc_compl *compl)
 286{
 287        u16 compl_status, extd_status;
 288        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 289        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 290        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
 291        struct be_cmd_resp_hdr *resp_hdr;
 292
 293        be_dws_le_to_cpu(compl, 4);
 294
 295        compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
 296                                        CQE_STATUS_COMPL_MASK;
 297        if (compl_status != MCC_STATUS_SUCCESS) {
 298                extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 299                                                CQE_STATUS_EXTD_MASK;
 300
 301                beiscsi_log(phba, KERN_ERR,
 302                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 303                            "BC_%d : error in cmd completion: "
 304                            "Subsystem : %d Opcode : %d "
 305                            "status(compl/extd)=%d/%d\n",
 306                            hdr->subsystem, hdr->opcode,
 307                            compl_status, extd_status);
 308
 309                if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
 310                        resp_hdr = (struct be_cmd_resp_hdr *) hdr;
 311                        if (resp_hdr->response_length)
 312                                return 0;
 313                }
 314                return -EBUSY;
 315        }
 316        return 0;
 317}
 318
 319int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
 320                                    struct be_mcc_compl *compl)
 321{
 322        u16 compl_status, extd_status;
 323        unsigned short tag;
 324
 325        be_dws_le_to_cpu(compl, 4);
 326
 327        compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
 328                                        CQE_STATUS_COMPL_MASK;
 329        /* The ctrl.mcc_numtag[tag] is filled with
 330         * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
 331         * [7:0] = compl_status
 332         */
 333        tag = (compl->tag0 & 0x000000FF);
 334        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 335                                        CQE_STATUS_EXTD_MASK;
 336
 337        ctrl->mcc_numtag[tag]  = 0x80000000;
 338        ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
 339        ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
 340        ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
 341        wake_up_interruptible(&ctrl->mcc_wait[tag]);
 342        return 0;
 343}
 344
 345static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
 346{
 347        struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
 348        struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
 349
 350        if (be_mcc_compl_is_new(compl)) {
 351                queue_tail_inc(mcc_cq);
 352                return compl;
 353        }
 354        return NULL;
 355}
 356
 357static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 358{
 359        iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
 360}
 361
 362void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
 363                struct be_async_event_link_state *evt)
 364{
 365        if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
 366            ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
 367             (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
 368                phba->state = BE_ADAPTER_LINK_DOWN;
 369
 370                beiscsi_log(phba, KERN_ERR,
 371                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
 372                            "BC_%d : Link Down on Port %d\n",
 373                            evt->physical_port);
 374
 375                iscsi_host_for_each_session(phba->shost,
 376                                            be2iscsi_fail_session);
 377        } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
 378                    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
 379                     (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
 380                phba->state = BE_ADAPTER_LINK_UP;
 381
 382                beiscsi_log(phba, KERN_ERR,
 383                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
 384                            "BC_%d : Link UP on Port %d\n",
 385                            evt->physical_port);
 386        }
 387}
 388
 389static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
 390                       u16 num_popped)
 391{
 392        u32 val = 0;
 393        val |= qid & DB_CQ_RING_ID_MASK;
 394        if (arm)
 395                val |= 1 << DB_CQ_REARM_SHIFT;
 396        val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
 397        iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 398}
 399
 400
 401int beiscsi_process_mcc(struct beiscsi_hba *phba)
 402{
 403        struct be_mcc_compl *compl;
 404        int num = 0, status = 0;
 405        struct be_ctrl_info *ctrl = &phba->ctrl;
 406
 407        spin_lock_bh(&phba->ctrl.mcc_cq_lock);
 408        while ((compl = be_mcc_compl_get(phba))) {
 409                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 410                        /* Interpret flags as an async trailer */
 411                        if (is_link_state_evt(compl->flags))
 412                                /* Interpret compl as a async link evt */
 413                                beiscsi_async_link_state_process(phba,
 414                                   (struct be_async_event_link_state *) compl);
 415                        else
 416                                beiscsi_log(phba, KERN_ERR,
 417                                            BEISCSI_LOG_CONFIG |
 418                                            BEISCSI_LOG_MBOX,
 419                                            "BC_%d : Unsupported Async Event, flags"
 420                                            " = 0x%08x\n", compl->flags);
 421
 422                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
 423                                status = be_mcc_compl_process(ctrl, compl);
 424                                atomic_dec(&phba->ctrl.mcc_obj.q.used);
 425                }
 426                be_mcc_compl_use(compl);
 427                num++;
 428        }
 429
 430        if (num)
 431                beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
 432
 433        spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
 434        return status;
 435}
 436
 437/*
 438 * be_mcc_wait_compl()- Wait for MBX completion
 439 * @phba: driver private structure
 440 *
 441 * Wait till no more pending mcc requests are present
 442 *
 443 * return
 444 * Success: 0
 445 * Failure: Non-Zero
 446 *
 447 **/
 448static int be_mcc_wait_compl(struct beiscsi_hba *phba)
 449{
 450        int i, status;
 451        for (i = 0; i < mcc_timeout; i++) {
 452                if (beiscsi_error(phba))
 453                        return -EIO;
 454
 455                status = beiscsi_process_mcc(phba);
 456                if (status)
 457                        return status;
 458
 459                if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
 460                        break;
 461                udelay(100);
 462        }
 463        if (i == mcc_timeout) {
 464                beiscsi_log(phba, KERN_ERR,
 465                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 466                            "BC_%d : FW Timed Out\n");
 467                phba->fw_timeout = true;
 468                beiscsi_ue_detect(phba);
 469                return -EBUSY;
 470        }
 471        return 0;
 472}
 473
 474/*
 475 * be_mcc_notify_wait()- Notify and wait for Compl
 476 * @phba: driver private structure
 477 *
 478 * Notify MCC requests and wait for completion
 479 *
 480 * return
 481 * Success: 0
 482 * Failure: Non-Zero
 483 **/
 484int be_mcc_notify_wait(struct beiscsi_hba *phba)
 485{
 486        be_mcc_notify(phba);
 487        return be_mcc_wait_compl(phba);
 488}
 489
 490/*
 491 * be_mbox_db_ready_wait()- Check ready status
 492 * @ctrl: Function specific MBX data structure
 493 *
 494 * Check for the ready status of FW to send BMBX
 495 * commands to adapter.
 496 *
 497 * return
 498 * Success: 0
 499 * Failure: Non-Zero
 500 **/
 501static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 502{
 503#define BEISCSI_MBX_RDY_BIT_TIMEOUT     4000    /* 4sec */
 504        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
 505        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 506        unsigned long timeout;
 507        bool read_flag = false;
 508        int ret = 0, i;
 509        u32 ready;
 510        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
 511
 512        if (beiscsi_error(phba))
 513                return -EIO;
 514
 515        timeout = jiffies + (HZ * 110);
 516
 517        do {
 518                for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
 519                        ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
 520                        if (ready) {
 521                                read_flag = true;
 522                                break;
 523                        }
 524                        mdelay(1);
 525                }
 526
 527                if (!read_flag) {
 528                        wait_event_timeout(rdybit_check_q,
 529                                          (read_flag != true),
 530                                           HZ * 5);
 531                }
 532        } while ((time_before(jiffies, timeout)) && !read_flag);
 533
 534        if (!read_flag) {
 535                beiscsi_log(phba, KERN_ERR,
 536                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 537                            "BC_%d : FW Timed Out\n");
 538                        phba->fw_timeout = true;
 539                        beiscsi_ue_detect(phba);
 540                        ret = -EBUSY;
 541        }
 542
 543        return ret;
 544}
 545
 546/*
 547 * be_mbox_notify: Notify adapter of new BMBX command
 548 * @ctrl: Function specific MBX data structure
 549 *
 550 * Ring doorbell to inform adapter of a BMBX command
 551 * to process
 552 *
 553 * return
 554 * Success: 0
 555 * Failure: Non-Zero
 556 **/
 557int be_mbox_notify(struct be_ctrl_info *ctrl)
 558{
 559        int status;
 560        u32 val = 0;
 561        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
 562        struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
 563        struct be_mcc_mailbox *mbox = mbox_mem->va;
 564        struct be_mcc_compl *compl = &mbox->compl;
 565        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 566
 567        status = be_mbox_db_ready_wait(ctrl);
 568        if (status)
 569                return status;
 570
 571        val &= ~MPU_MAILBOX_DB_RDY_MASK;
 572        val |= MPU_MAILBOX_DB_HI_MASK;
 573        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
 574        iowrite32(val, db);
 575
 576        status = be_mbox_db_ready_wait(ctrl);
 577        if (status)
 578                return status;
 579
 580        val = 0;
 581        val &= ~MPU_MAILBOX_DB_RDY_MASK;
 582        val &= ~MPU_MAILBOX_DB_HI_MASK;
 583        val |= (u32) (mbox_mem->dma >> 4) << 2;
 584        iowrite32(val, db);
 585
 586        status = be_mbox_db_ready_wait(ctrl);
 587        if (status)
 588                return status;
 589
 590        if (be_mcc_compl_is_new(compl)) {
 591                status = be_mcc_compl_process(ctrl, &mbox->compl);
 592                be_mcc_compl_use(compl);
 593                if (status) {
 594                        beiscsi_log(phba, KERN_ERR,
 595                                    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 596                                    "BC_%d : After be_mcc_compl_process\n");
 597
 598                        return status;
 599                }
 600        } else {
 601                beiscsi_log(phba, KERN_ERR,
 602                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 603                            "BC_%d : Invalid Mailbox Completion\n");
 604
 605                return -EBUSY;
 606        }
 607        return 0;
 608}
 609
 610/*
 611 * Insert the mailbox address into the doorbell in two steps
 612 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
 613 */
 614static int be_mbox_notify_wait(struct beiscsi_hba *phba)
 615{
 616        int status;
 617        u32 val = 0;
 618        void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
 619        struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
 620        struct be_mcc_mailbox *mbox = mbox_mem->va;
 621        struct be_mcc_compl *compl = &mbox->compl;
 622        struct be_ctrl_info *ctrl = &phba->ctrl;
 623
 624        status = be_mbox_db_ready_wait(ctrl);
 625        if (status)
 626                return status;
 627
 628        val |= MPU_MAILBOX_DB_HI_MASK;
 629        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
 630        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
 631        iowrite32(val, db);
 632
 633        /* wait for ready to be set */
 634        status = be_mbox_db_ready_wait(ctrl);
 635        if (status != 0)
 636                return status;
 637
 638        val = 0;
 639        /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
 640        val |= (u32)(mbox_mem->dma >> 4) << 2;
 641        iowrite32(val, db);
 642
 643        status = be_mbox_db_ready_wait(ctrl);
 644        if (status != 0)
 645                return status;
 646
 647        /* A cq entry has been made now */
 648        if (be_mcc_compl_is_new(compl)) {
 649                status = be_mcc_compl_process(ctrl, &mbox->compl);
 650                be_mcc_compl_use(compl);
 651                if (status)
 652                        return status;
 653        } else {
 654                beiscsi_log(phba, KERN_ERR,
 655                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 656                            "BC_%d : invalid mailbox completion\n");
 657
 658                return -EBUSY;
 659        }
 660        return 0;
 661}
 662
 663void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 664                                bool embedded, u8 sge_cnt)
 665{
 666        if (embedded)
 667                wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
 668        else
 669                wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
 670                                                MCC_WRB_SGE_CNT_SHIFT;
 671        wrb->payload_length = payload_len;
 672        be_dws_cpu_to_le(wrb, 8);
 673}
 674
 675void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 676                        u8 subsystem, u8 opcode, int cmd_len)
 677{
 678        req_hdr->opcode = opcode;
 679        req_hdr->subsystem = subsystem;
 680        req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
 681        req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
 682}
 683
 684static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
 685                                                        struct be_dma_mem *mem)
 686{
 687        int i, buf_pages;
 688        u64 dma = (u64) mem->dma;
 689
 690        buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
 691        for (i = 0; i < buf_pages; i++) {
 692                pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
 693                pages[i].hi = cpu_to_le32(upper_32_bits(dma));
 694                dma += PAGE_SIZE_4K;
 695        }
 696}
 697
 698static u32 eq_delay_to_mult(u32 usec_delay)
 699{
 700#define MAX_INTR_RATE 651042
 701        const u32 round = 10;
 702        u32 multiplier;
 703
 704        if (usec_delay == 0)
 705                multiplier = 0;
 706        else {
 707                u32 interrupt_rate = 1000000 / usec_delay;
 708                if (interrupt_rate == 0)
 709                        multiplier = 1023;
 710                else {
 711                        multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
 712                        multiplier /= interrupt_rate;
 713                        multiplier = (multiplier + round / 2) / round;
 714                        multiplier = min(multiplier, (u32) 1023);
 715                }
 716        }
 717        return multiplier;
 718}
 719
 720struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
 721{
 722        return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 723}
 724
 725struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
 726{
 727        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 728        struct be_mcc_wrb *wrb;
 729
 730        WARN_ON(atomic_read(&mccq->used) >= mccq->len);
 731        wrb = queue_head_node(mccq);
 732        memset(wrb, 0, sizeof(*wrb));
 733        wrb->tag0 = (mccq->head & 0x000000FF) << 16;
 734        queue_head_inc(mccq);
 735        atomic_inc(&mccq->used);
 736        return wrb;
 737}
 738
 739
 740int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 741                          struct be_queue_info *eq, int eq_delay)
 742{
 743        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 744        struct be_cmd_req_eq_create *req = embedded_payload(wrb);
 745        struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
 746        struct be_dma_mem *q_mem = &eq->dma_mem;
 747        int status;
 748
 749        spin_lock(&ctrl->mbox_lock);
 750        memset(wrb, 0, sizeof(*wrb));
 751
 752        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 753
 754        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 755                        OPCODE_COMMON_EQ_CREATE, sizeof(*req));
 756
 757        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 758
 759        AMAP_SET_BITS(struct amap_eq_context, func, req->context,
 760                                                PCI_FUNC(ctrl->pdev->devfn));
 761        AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
 762        AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
 763        AMAP_SET_BITS(struct amap_eq_context, count, req->context,
 764                                        __ilog2_u32(eq->len / 256));
 765        AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
 766                                        eq_delay_to_mult(eq_delay));
 767        be_dws_cpu_to_le(req->context, sizeof(req->context));
 768
 769        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 770
 771        status = be_mbox_notify(ctrl);
 772        if (!status) {
 773                eq->id = le16_to_cpu(resp->eq_id);
 774                eq->created = true;
 775        }
 776        spin_unlock(&ctrl->mbox_lock);
 777        return status;
 778}
 779
 780/**
 781 * be_cmd_fw_initialize()- Initialize FW
 782 * @ctrl: Pointer to function control structure
 783 *
 784 * Send FW initialize pattern for the function.
 785 *
 786 * return
 787 * Success: 0
 788 * Failure: Non-Zero value
 789 **/
 790int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 791{
 792        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 793        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 794        int status;
 795        u8 *endian_check;
 796
 797        spin_lock(&ctrl->mbox_lock);
 798        memset(wrb, 0, sizeof(*wrb));
 799
 800        endian_check = (u8 *) wrb;
 801        *endian_check++ = 0xFF;
 802        *endian_check++ = 0x12;
 803        *endian_check++ = 0x34;
 804        *endian_check++ = 0xFF;
 805        *endian_check++ = 0xFF;
 806        *endian_check++ = 0x56;
 807        *endian_check++ = 0x78;
 808        *endian_check++ = 0xFF;
 809        be_dws_cpu_to_le(wrb, sizeof(*wrb));
 810
 811        status = be_mbox_notify(ctrl);
 812        if (status)
 813                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 814                            "BC_%d : be_cmd_fw_initialize Failed\n");
 815
 816        spin_unlock(&ctrl->mbox_lock);
 817        return status;
 818}
 819
 820/**
 821 * be_cmd_fw_uninit()- Uinitialize FW
 822 * @ctrl: Pointer to function control structure
 823 *
 824 * Send FW uninitialize pattern for the function
 825 *
 826 * return
 827 * Success: 0
 828 * Failure: Non-Zero value
 829 **/
 830int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
 831{
 832        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 833        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 834        int status;
 835        u8 *endian_check;
 836
 837        spin_lock(&ctrl->mbox_lock);
 838        memset(wrb, 0, sizeof(*wrb));
 839
 840        endian_check = (u8 *) wrb;
 841        *endian_check++ = 0xFF;
 842        *endian_check++ = 0xAA;
 843        *endian_check++ = 0xBB;
 844        *endian_check++ = 0xFF;
 845        *endian_check++ = 0xFF;
 846        *endian_check++ = 0xCC;
 847        *endian_check++ = 0xDD;
 848        *endian_check = 0xFF;
 849
 850        be_dws_cpu_to_le(wrb, sizeof(*wrb));
 851
 852        status = be_mbox_notify(ctrl);
 853        if (status)
 854                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 855                            "BC_%d : be_cmd_fw_uninit Failed\n");
 856
 857        spin_unlock(&ctrl->mbox_lock);
 858        return status;
 859}
 860
 861int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 862                          struct be_queue_info *cq, struct be_queue_info *eq,
 863                          bool sol_evts, bool no_delay, int coalesce_wm)
 864{
 865        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 866        struct be_cmd_req_cq_create *req = embedded_payload(wrb);
 867        struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
 868        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 869        struct be_dma_mem *q_mem = &cq->dma_mem;
 870        void *ctxt = &req->context;
 871        int status;
 872
 873        spin_lock(&ctrl->mbox_lock);
 874        memset(wrb, 0, sizeof(*wrb));
 875
 876        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 877
 878        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 879                        OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 880
 881        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 882        if (is_chip_be2_be3r(phba)) {
 883                AMAP_SET_BITS(struct amap_cq_context, coalescwm,
 884                              ctxt, coalesce_wm);
 885                AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
 886                AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
 887                              __ilog2_u32(cq->len / 256));
 888                AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
 889                AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
 890                AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
 891                AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
 892                AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
 893                AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
 894                              PCI_FUNC(ctrl->pdev->devfn));
 895        } else {
 896                req->hdr.version = MBX_CMD_VER2;
 897                req->page_size = 1;
 898                AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
 899                              ctxt, coalesce_wm);
 900                AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
 901                              ctxt, no_delay);
 902                AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
 903                              __ilog2_u32(cq->len / 256));
 904                AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
 905                AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
 906                AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
 907                AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
 908        }
 909
 910        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 911
 912        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 913
 914        status = be_mbox_notify(ctrl);
 915        if (!status) {
 916                cq->id = le16_to_cpu(resp->cq_id);
 917                cq->created = true;
 918        } else
 919                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 920                            "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
 921                            status);
 922
 923        spin_unlock(&ctrl->mbox_lock);
 924
 925        return status;
 926}
 927
 928static u32 be_encoded_q_len(int q_len)
 929{
 930        u32 len_encoded = fls(q_len);   /* log2(len) + 1 */
 931        if (len_encoded == 16)
 932                len_encoded = 0;
 933        return len_encoded;
 934}
 935
 936int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
 937                        struct be_queue_info *mccq,
 938                        struct be_queue_info *cq)
 939{
 940        struct be_mcc_wrb *wrb;
 941        struct be_cmd_req_mcc_create *req;
 942        struct be_dma_mem *q_mem = &mccq->dma_mem;
 943        struct be_ctrl_info *ctrl;
 944        void *ctxt;
 945        int status;
 946
 947        spin_lock(&phba->ctrl.mbox_lock);
 948        ctrl = &phba->ctrl;
 949        wrb = wrb_from_mbox(&ctrl->mbox_mem);
 950        memset(wrb, 0, sizeof(*wrb));
 951        req = embedded_payload(wrb);
 952        ctxt = &req->context;
 953
 954        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 955
 956        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 957                        OPCODE_COMMON_MCC_CREATE, sizeof(*req));
 958
 959        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 960
 961        AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
 962                      PCI_FUNC(phba->pcidev->devfn));
 963        AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
 964        AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
 965                be_encoded_q_len(mccq->len));
 966        AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
 967
 968        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 969
 970        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 971
 972        status = be_mbox_notify_wait(phba);
 973        if (!status) {
 974                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
 975                mccq->id = le16_to_cpu(resp->id);
 976                mccq->created = true;
 977        }
 978        spin_unlock(&phba->ctrl.mbox_lock);
 979
 980        return status;
 981}
 982
 983int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 984                          int queue_type)
 985{
 986        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 987        struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
 988        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 989        u8 subsys = 0, opcode = 0;
 990        int status;
 991
 992        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 993                    "BC_%d : In beiscsi_cmd_q_destroy "
 994                    "queue_type : %d\n", queue_type);
 995
 996        spin_lock(&ctrl->mbox_lock);
 997        memset(wrb, 0, sizeof(*wrb));
 998        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 999
1000        switch (queue_type) {
1001        case QTYPE_EQ:
1002                subsys = CMD_SUBSYSTEM_COMMON;
1003                opcode = OPCODE_COMMON_EQ_DESTROY;
1004                break;
1005        case QTYPE_CQ:
1006                subsys = CMD_SUBSYSTEM_COMMON;
1007                opcode = OPCODE_COMMON_CQ_DESTROY;
1008                break;
1009        case QTYPE_MCCQ:
1010                subsys = CMD_SUBSYSTEM_COMMON;
1011                opcode = OPCODE_COMMON_MCC_DESTROY;
1012                break;
1013        case QTYPE_WRBQ:
1014                subsys = CMD_SUBSYSTEM_ISCSI;
1015                opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
1016                break;
1017        case QTYPE_DPDUQ:
1018                subsys = CMD_SUBSYSTEM_ISCSI;
1019                opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
1020                break;
1021        case QTYPE_SGL:
1022                subsys = CMD_SUBSYSTEM_ISCSI;
1023                opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
1024                break;
1025        default:
1026                spin_unlock(&ctrl->mbox_lock);
1027                BUG();
1028                return -ENXIO;
1029        }
1030        be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
1031        if (queue_type != QTYPE_SGL)
1032                req->id = cpu_to_le16(q->id);
1033
1034        status = be_mbox_notify(ctrl);
1035
1036        spin_unlock(&ctrl->mbox_lock);
1037        return status;
1038}
1039
1040/**
1041 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
1042 * @ctrl: ptr to ctrl_info
1043 * @cq: Completion Queue
1044 * @dq: Default Queue
1045 * @lenght: ring size
1046 * @entry_size: size of each entry in DEFQ
1047 * @is_header: Header or Data DEFQ
1048 * @ulp_num: Bind to which ULP
1049 *
1050 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
1051 * on this queue by the FW
1052 *
1053 * return
1054 *      Success: 0
1055 *      Failure: Non-Zero Value
1056 *
1057 **/
1058int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
1059                                    struct be_queue_info *cq,
1060                                    struct be_queue_info *dq, int length,
1061                                    int entry_size, uint8_t is_header,
1062                                    uint8_t ulp_num)
1063{
1064        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1065        struct be_defq_create_req *req = embedded_payload(wrb);
1066        struct be_dma_mem *q_mem = &dq->dma_mem;
1067        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1068        void *ctxt = &req->context;
1069        int status;
1070
1071        spin_lock(&ctrl->mbox_lock);
1072        memset(wrb, 0, sizeof(*wrb));
1073
1074        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1075
1076        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1077                           OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
1078
1079        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1080        if (phba->fw_config.dual_ulp_aware) {
1081                req->ulp_num = ulp_num;
1082                req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1083                req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1084        }
1085
1086        if (is_chip_be2_be3r(phba)) {
1087                AMAP_SET_BITS(struct amap_be_default_pdu_context,
1088                              rx_pdid, ctxt, 0);
1089                AMAP_SET_BITS(struct amap_be_default_pdu_context,
1090                              rx_pdid_valid, ctxt, 1);
1091                AMAP_SET_BITS(struct amap_be_default_pdu_context,
1092                              pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1093                AMAP_SET_BITS(struct amap_be_default_pdu_context,
1094                              ring_size, ctxt,
1095                              be_encoded_q_len(length /
1096                              sizeof(struct phys_addr)));
1097                AMAP_SET_BITS(struct amap_be_default_pdu_context,
1098                              default_buffer_size, ctxt, entry_size);
1099                AMAP_SET_BITS(struct amap_be_default_pdu_context,
1100                              cq_id_recv, ctxt, cq->id);
1101        } else {
1102                AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1103                              rx_pdid, ctxt, 0);
1104                AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1105                              rx_pdid_valid, ctxt, 1);
1106                AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1107                              ring_size, ctxt,
1108                              be_encoded_q_len(length /
1109                              sizeof(struct phys_addr)));
1110                AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1111                              default_buffer_size, ctxt, entry_size);
1112                AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1113                              cq_id_recv, ctxt, cq->id);
1114        }
1115
1116        be_dws_cpu_to_le(ctxt, sizeof(req->context));
1117
1118        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1119
1120        status = be_mbox_notify(ctrl);
1121        if (!status) {
1122                struct be_ring *defq_ring;
1123                struct be_defq_create_resp *resp = embedded_payload(wrb);
1124
1125                dq->id = le16_to_cpu(resp->id);
1126                dq->created = true;
1127                if (is_header)
1128                        defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1129                else
1130                        defq_ring = &phba->phwi_ctrlr->
1131                                    default_pdu_data[ulp_num];
1132
1133                defq_ring->id = dq->id;
1134
1135                if (!phba->fw_config.dual_ulp_aware) {
1136                        defq_ring->ulp_num = BEISCSI_ULP0;
1137                        defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1138                } else {
1139                        defq_ring->ulp_num = resp->ulp_num;
1140                        defq_ring->doorbell_offset = resp->doorbell_offset;
1141                }
1142        }
1143        spin_unlock(&ctrl->mbox_lock);
1144
1145        return status;
1146}
1147
1148/**
1149 * be_cmd_wrbq_create()- Create WRBQ
1150 * @ctrl: ptr to ctrl_info
1151 * @q_mem: memory details for the queue
1152 * @wrbq: queue info
1153 * @pwrb_context: ptr to wrb_context
1154 * @ulp_num: ULP on which the WRBQ is to be created
1155 *
1156 * Create WRBQ on the passed ULP_NUM.
1157 *
1158 **/
1159int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1160                        struct be_dma_mem *q_mem,
1161                        struct be_queue_info *wrbq,
1162                        struct hwi_wrb_context *pwrb_context,
1163                        uint8_t ulp_num)
1164{
1165        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1166        struct be_wrbq_create_req *req = embedded_payload(wrb);
1167        struct be_wrbq_create_resp *resp = embedded_payload(wrb);
1168        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1169        int status;
1170
1171        spin_lock(&ctrl->mbox_lock);
1172        memset(wrb, 0, sizeof(*wrb));
1173
1174        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1175
1176        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1177                OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1178        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1179
1180        if (phba->fw_config.dual_ulp_aware) {
1181                req->ulp_num = ulp_num;
1182                req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1183                req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1184        }
1185
1186        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1187
1188        status = be_mbox_notify(ctrl);
1189        if (!status) {
1190                wrbq->id = le16_to_cpu(resp->cid);
1191                wrbq->created = true;
1192
1193                pwrb_context->cid = wrbq->id;
1194                if (!phba->fw_config.dual_ulp_aware) {
1195                        pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1196                        pwrb_context->ulp_num = BEISCSI_ULP0;
1197                } else {
1198                        pwrb_context->ulp_num = resp->ulp_num;
1199                        pwrb_context->doorbell_offset = resp->doorbell_offset;
1200                }
1201        }
1202        spin_unlock(&ctrl->mbox_lock);
1203        return status;
1204}
1205
1206int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1207                                    struct be_dma_mem *q_mem)
1208{
1209        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1210        struct be_post_template_pages_req *req = embedded_payload(wrb);
1211        int status;
1212
1213        spin_lock(&ctrl->mbox_lock);
1214
1215        memset(wrb, 0, sizeof(*wrb));
1216        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1217        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1218                           OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1219                           sizeof(*req));
1220
1221        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1222        req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1223        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1224
1225        status = be_mbox_notify(ctrl);
1226        spin_unlock(&ctrl->mbox_lock);
1227        return status;
1228}
1229
1230int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1231{
1232        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1233        struct be_remove_template_pages_req *req = embedded_payload(wrb);
1234        int status;
1235
1236        spin_lock(&ctrl->mbox_lock);
1237
1238        memset(wrb, 0, sizeof(*wrb));
1239        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1240        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1241                           OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1242                           sizeof(*req));
1243
1244        req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1245
1246        status = be_mbox_notify(ctrl);
1247        spin_unlock(&ctrl->mbox_lock);
1248        return status;
1249}
1250
1251int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1252                                struct be_dma_mem *q_mem,
1253                                u32 page_offset, u32 num_pages)
1254{
1255        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1256        struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1257        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1258        int status;
1259        unsigned int curr_pages;
1260        u32 internal_page_offset = 0;
1261        u32 temp_num_pages = num_pages;
1262
1263        if (num_pages == 0xff)
1264                num_pages = 1;
1265
1266        spin_lock(&ctrl->mbox_lock);
1267        do {
1268                memset(wrb, 0, sizeof(*wrb));
1269                be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1270                be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1271                                   OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1272                                   sizeof(*req));
1273                curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1274                                                pages);
1275                req->num_pages = min(num_pages, curr_pages);
1276                req->page_offset = page_offset;
1277                be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1278                q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1279                internal_page_offset += req->num_pages;
1280                page_offset += req->num_pages;
1281                num_pages -= req->num_pages;
1282
1283                if (temp_num_pages == 0xff)
1284                        req->num_pages = temp_num_pages;
1285
1286                status = be_mbox_notify(ctrl);
1287                if (status) {
1288                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1289                                    "BC_%d : FW CMD to map iscsi frags failed.\n");
1290
1291                        goto error;
1292                }
1293        } while (num_pages > 0);
1294error:
1295        spin_unlock(&ctrl->mbox_lock);
1296        if (status != 0)
1297                beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1298        return status;
1299}
1300
1301int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
1302{
1303        struct be_ctrl_info *ctrl = &phba->ctrl;
1304        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1305        struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1306        int status;
1307
1308        spin_lock(&ctrl->mbox_lock);
1309
1310        req = embedded_payload(wrb);
1311        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1312        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1313                           OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1314        status = be_mbox_notify_wait(phba);
1315
1316        spin_unlock(&ctrl->mbox_lock);
1317        return status;
1318}
1319
1320/**
1321 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1322 * @phba: device priv structure instance
1323 * @vlan_tag: TAG to be set
1324 *
1325 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1326 *
1327 * returns
1328 *      TAG for the MBX Cmd
1329 * **/
1330int be_cmd_set_vlan(struct beiscsi_hba *phba,
1331                     uint16_t vlan_tag)
1332{
1333        unsigned int tag = 0;
1334        struct be_mcc_wrb *wrb;
1335        struct be_cmd_set_vlan_req *req;
1336        struct be_ctrl_info *ctrl = &phba->ctrl;
1337
1338        spin_lock(&ctrl->mbox_lock);
1339        tag = alloc_mcc_tag(phba);
1340        if (!tag) {
1341                spin_unlock(&ctrl->mbox_lock);
1342                return tag;
1343        }
1344
1345        wrb = wrb_from_mccq(phba);
1346        req = embedded_payload(wrb);
1347        wrb->tag0 |= tag;
1348        be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1349        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1350                           OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1351                           sizeof(*req));
1352
1353        req->interface_hndl = phba->interface_handle;
1354        req->vlan_priority = vlan_tag;
1355
1356        be_mcc_notify(phba);
1357        spin_unlock(&ctrl->mbox_lock);
1358
1359        return tag;
1360}
1361