linux/drivers/net/ethernet/emulex/benet/be_cmds.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2013 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@emulex.com
  12 *
  13 * Emulex
  14 * 3333 Susan Street
  15 * Costa Mesa, CA 92626
  16 */
  17
  18#include <linux/module.h>
  19#include "be.h"
  20#include "be_cmds.h"
  21
  22static struct be_cmd_priv_map cmd_priv_map[] = {
  23        {
  24                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  25                CMD_SUBSYSTEM_ETH,
  26                BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  27                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  28        },
  29        {
  30                OPCODE_COMMON_GET_FLOW_CONTROL,
  31                CMD_SUBSYSTEM_COMMON,
  32                BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
  33                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  34        },
  35        {
  36                OPCODE_COMMON_SET_FLOW_CONTROL,
  37                CMD_SUBSYSTEM_COMMON,
  38                BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  39                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  40        },
  41        {
  42                OPCODE_ETH_GET_PPORT_STATS,
  43                CMD_SUBSYSTEM_ETH,
  44                BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  45                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  46        },
  47        {
  48                OPCODE_COMMON_GET_PHY_DETAILS,
  49                CMD_SUBSYSTEM_COMMON,
  50                BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  51                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  52        }
  53};
  54
  55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
  56                           u8 subsystem)
  57{
  58        int i;
  59        int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
  60        u32 cmd_privileges = adapter->cmd_privileges;
  61
  62        for (i = 0; i < num_entries; i++)
  63                if (opcode == cmd_priv_map[i].opcode &&
  64                    subsystem == cmd_priv_map[i].subsystem)
  65                        if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
  66                                return false;
  67
  68        return true;
  69}
  70
  71static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  72{
  73        return wrb->payload.embedded_payload;
  74}
  75
  76static void be_mcc_notify(struct be_adapter *adapter)
  77{
  78        struct be_queue_info *mccq = &adapter->mcc_obj.q;
  79        u32 val = 0;
  80
  81        if (be_error(adapter))
  82                return;
  83
  84        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  85        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  86
  87        wmb();
  88        iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  89}
  90
  91/* To check if valid bit is set, check the entire word as we don't know
  92 * the endianness of the data (old entry is host endian while a new entry is
  93 * little endian) */
  94static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  95{
  96        u32 flags;
  97
  98        if (compl->flags != 0) {
  99                flags = le32_to_cpu(compl->flags);
 100                if (flags & CQE_FLAGS_VALID_MASK) {
 101                        compl->flags = flags;
 102                        return true;
 103                }
 104        }
 105        return false;
 106}
 107
 108/* Need to reset the entire word that houses the valid bit */
 109static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
 110{
 111        compl->flags = 0;
 112}
 113
 114static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
 115{
 116        unsigned long addr;
 117
 118        addr = tag1;
 119        addr = ((addr << 16) << 16) | tag0;
 120        return (void *)addr;
 121}
 122
 123static int be_mcc_compl_process(struct be_adapter *adapter,
 124                                struct be_mcc_compl *compl)
 125{
 126        u16 compl_status, extd_status;
 127        struct be_cmd_resp_hdr *resp_hdr;
 128        u8 opcode = 0, subsystem = 0;
 129
 130        /* Just swap the status to host endian; mcc tag is opaquely copied
 131         * from mcc_wrb */
 132        be_dws_le_to_cpu(compl, 4);
 133
 134        compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
 135                                CQE_STATUS_COMPL_MASK;
 136
 137        resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
 138
 139        if (resp_hdr) {
 140                opcode = resp_hdr->opcode;
 141                subsystem = resp_hdr->subsystem;
 142        }
 143
 144        if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
 145             (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
 146            (subsystem == CMD_SUBSYSTEM_COMMON)) {
 147                adapter->flash_status = compl_status;
 148                complete(&adapter->flash_compl);
 149        }
 150
 151        if (compl_status == MCC_STATUS_SUCCESS) {
 152                if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
 153                     (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
 154                    (subsystem == CMD_SUBSYSTEM_ETH)) {
 155                        be_parse_stats(adapter);
 156                        adapter->stats_cmd_sent = false;
 157                }
 158                if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
 159                    subsystem == CMD_SUBSYSTEM_COMMON) {
 160                        struct be_cmd_resp_get_cntl_addnl_attribs *resp =
 161                                (void *)resp_hdr;
 162                        adapter->drv_stats.be_on_die_temperature =
 163                                resp->on_die_temperature;
 164                }
 165        } else {
 166                if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
 167                        adapter->be_get_temp_freq = 0;
 168
 169                if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
 170                        compl_status == MCC_STATUS_ILLEGAL_REQUEST)
 171                        goto done;
 172
 173                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
 174                        dev_warn(&adapter->pdev->dev,
 175                                 "VF is not privileged to issue opcode %d-%d\n",
 176                                 opcode, subsystem);
 177                } else {
 178                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 179                                        CQE_STATUS_EXTD_MASK;
 180                        dev_err(&adapter->pdev->dev,
 181                                "opcode %d-%d failed:status %d-%d\n",
 182                                opcode, subsystem, compl_status, extd_status);
 183                }
 184        }
 185done:
 186        return compl_status;
 187}
 188
 189/* Link state evt is a string of bytes; no need for endian swapping */
 190static void be_async_link_state_process(struct be_adapter *adapter,
 191                struct be_async_event_link_state *evt)
 192{
 193        /* When link status changes, link speed must be re-queried from FW */
 194        adapter->phy.link_speed = -1;
 195
 196        /* Ignore physical link event */
 197        if (lancer_chip(adapter) &&
 198            !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
 199                return;
 200
 201        /* For the initial link status do not rely on the ASYNC event as
 202         * it may not be received in some cases.
 203         */
 204        if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
 205                be_link_status_update(adapter, evt->port_link_status);
 206}
 207
 208/* Grp5 CoS Priority evt */
 209static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 210                struct be_async_event_grp5_cos_priority *evt)
 211{
 212        if (evt->valid) {
 213                adapter->vlan_prio_bmap = evt->available_priority_bmap;
 214                adapter->recommended_prio &= ~VLAN_PRIO_MASK;
 215                adapter->recommended_prio =
 216                        evt->reco_default_priority << VLAN_PRIO_SHIFT;
 217        }
 218}
 219
 220/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
 221static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
 222                struct be_async_event_grp5_qos_link_speed *evt)
 223{
 224        if (adapter->phy.link_speed >= 0 &&
 225            evt->physical_port == adapter->port_num)
 226                adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
 227}
 228
 229/*Grp5 PVID evt*/
 230static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
 231                struct be_async_event_grp5_pvid_state *evt)
 232{
 233        if (evt->enabled)
 234                adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
 235        else
 236                adapter->pvid = 0;
 237}
 238
 239static void be_async_grp5_evt_process(struct be_adapter *adapter,
 240                u32 trailer, struct be_mcc_compl *evt)
 241{
 242        u8 event_type = 0;
 243
 244        event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
 245                ASYNC_TRAILER_EVENT_TYPE_MASK;
 246
 247        switch (event_type) {
 248        case ASYNC_EVENT_COS_PRIORITY:
 249                be_async_grp5_cos_priority_process(adapter,
 250                (struct be_async_event_grp5_cos_priority *)evt);
 251        break;
 252        case ASYNC_EVENT_QOS_SPEED:
 253                be_async_grp5_qos_speed_process(adapter,
 254                (struct be_async_event_grp5_qos_link_speed *)evt);
 255        break;
 256        case ASYNC_EVENT_PVID_STATE:
 257                be_async_grp5_pvid_state_process(adapter,
 258                (struct be_async_event_grp5_pvid_state *)evt);
 259        break;
 260        default:
 261                dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
 262                break;
 263        }
 264}
 265
 266static void be_async_dbg_evt_process(struct be_adapter *adapter,
 267                u32 trailer, struct be_mcc_compl *cmp)
 268{
 269        u8 event_type = 0;
 270        struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
 271
 272        event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
 273                ASYNC_TRAILER_EVENT_TYPE_MASK;
 274
 275        switch (event_type) {
 276        case ASYNC_DEBUG_EVENT_TYPE_QNQ:
 277                if (evt->valid)
 278                        adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
 279                adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
 280        break;
 281        default:
 282                dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
 283        break;
 284        }
 285}
 286
 287static inline bool is_link_state_evt(u32 trailer)
 288{
 289        return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 290                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 291                                ASYNC_EVENT_CODE_LINK_STATE;
 292}
 293
 294static inline bool is_grp5_evt(u32 trailer)
 295{
 296        return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 297                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 298                                ASYNC_EVENT_CODE_GRP_5);
 299}
 300
 301static inline bool is_dbg_evt(u32 trailer)
 302{
 303        return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 304                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 305                                ASYNC_EVENT_CODE_QNQ);
 306}
 307
 308static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
 309{
 310        struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
 311        struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
 312
 313        if (be_mcc_compl_is_new(compl)) {
 314                queue_tail_inc(mcc_cq);
 315                return compl;
 316        }
 317        return NULL;
 318}
 319
 320void be_async_mcc_enable(struct be_adapter *adapter)
 321{
 322        spin_lock_bh(&adapter->mcc_cq_lock);
 323
 324        be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
 325        adapter->mcc_obj.rearm_cq = true;
 326
 327        spin_unlock_bh(&adapter->mcc_cq_lock);
 328}
 329
 330void be_async_mcc_disable(struct be_adapter *adapter)
 331{
 332        spin_lock_bh(&adapter->mcc_cq_lock);
 333
 334        adapter->mcc_obj.rearm_cq = false;
 335        be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
 336
 337        spin_unlock_bh(&adapter->mcc_cq_lock);
 338}
 339
 340int be_process_mcc(struct be_adapter *adapter)
 341{
 342        struct be_mcc_compl *compl;
 343        int num = 0, status = 0;
 344        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 345
 346        spin_lock(&adapter->mcc_cq_lock);
 347        while ((compl = be_mcc_compl_get(adapter))) {
 348                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 349                        /* Interpret flags as an async trailer */
 350                        if (is_link_state_evt(compl->flags))
 351                                be_async_link_state_process(adapter,
 352                                (struct be_async_event_link_state *) compl);
 353                        else if (is_grp5_evt(compl->flags))
 354                                be_async_grp5_evt_process(adapter,
 355                                compl->flags, compl);
 356                        else if (is_dbg_evt(compl->flags))
 357                                be_async_dbg_evt_process(adapter,
 358                                compl->flags, compl);
 359                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
 360                                status = be_mcc_compl_process(adapter, compl);
 361                                atomic_dec(&mcc_obj->q.used);
 362                }
 363                be_mcc_compl_use(compl);
 364                num++;
 365        }
 366
 367        if (num)
 368                be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
 369
 370        spin_unlock(&adapter->mcc_cq_lock);
 371        return status;
 372}
 373
 374/* Wait till no more pending mcc requests are present */
 375static int be_mcc_wait_compl(struct be_adapter *adapter)
 376{
 377#define mcc_timeout             120000 /* 12s timeout */
 378        int i, status = 0;
 379        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 380
 381        for (i = 0; i < mcc_timeout; i++) {
 382                if (be_error(adapter))
 383                        return -EIO;
 384
 385                local_bh_disable();
 386                status = be_process_mcc(adapter);
 387                local_bh_enable();
 388
 389                if (atomic_read(&mcc_obj->q.used) == 0)
 390                        break;
 391                udelay(100);
 392        }
 393        if (i == mcc_timeout) {
 394                dev_err(&adapter->pdev->dev, "FW not responding\n");
 395                adapter->fw_timeout = true;
 396                return -EIO;
 397        }
 398        return status;
 399}
 400
 401/* Notify MCC requests and wait for completion */
 402static int be_mcc_notify_wait(struct be_adapter *adapter)
 403{
 404        int status;
 405        struct be_mcc_wrb *wrb;
 406        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 407        u16 index = mcc_obj->q.head;
 408        struct be_cmd_resp_hdr *resp;
 409
 410        index_dec(&index, mcc_obj->q.len);
 411        wrb = queue_index_node(&mcc_obj->q, index);
 412
 413        resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
 414
 415        be_mcc_notify(adapter);
 416
 417        status = be_mcc_wait_compl(adapter);
 418        if (status == -EIO)
 419                goto out;
 420
 421        status = resp->status;
 422out:
 423        return status;
 424}
 425
 426static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 427{
 428        int msecs = 0;
 429        u32 ready;
 430
 431        do {
 432                if (be_error(adapter))
 433                        return -EIO;
 434
 435                ready = ioread32(db);
 436                if (ready == 0xffffffff)
 437                        return -1;
 438
 439                ready &= MPU_MAILBOX_DB_RDY_MASK;
 440                if (ready)
 441                        break;
 442
 443                if (msecs > 4000) {
 444                        dev_err(&adapter->pdev->dev, "FW not responding\n");
 445                        adapter->fw_timeout = true;
 446                        be_detect_error(adapter);
 447                        return -1;
 448                }
 449
 450                msleep(1);
 451                msecs++;
 452        } while (true);
 453
 454        return 0;
 455}
 456
 457/*
 458 * Insert the mailbox address into the doorbell in two steps
 459 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
 460 */
 461static int be_mbox_notify_wait(struct be_adapter *adapter)
 462{
 463        int status;
 464        u32 val = 0;
 465        void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
 466        struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
 467        struct be_mcc_mailbox *mbox = mbox_mem->va;
 468        struct be_mcc_compl *compl = &mbox->compl;
 469
 470        /* wait for ready to be set */
 471        status = be_mbox_db_ready_wait(adapter, db);
 472        if (status != 0)
 473                return status;
 474
 475        val |= MPU_MAILBOX_DB_HI_MASK;
 476        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
 477        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
 478        iowrite32(val, db);
 479
 480        /* wait for ready to be set */
 481        status = be_mbox_db_ready_wait(adapter, db);
 482        if (status != 0)
 483                return status;
 484
 485        val = 0;
 486        /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
 487        val |= (u32)(mbox_mem->dma >> 4) << 2;
 488        iowrite32(val, db);
 489
 490        status = be_mbox_db_ready_wait(adapter, db);
 491        if (status != 0)
 492                return status;
 493
 494        /* A cq entry has been made now */
 495        if (be_mcc_compl_is_new(compl)) {
 496                status = be_mcc_compl_process(adapter, &mbox->compl);
 497                be_mcc_compl_use(compl);
 498                if (status)
 499                        return status;
 500        } else {
 501                dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
 502                return -1;
 503        }
 504        return 0;
 505}
 506
 507static u16 be_POST_stage_get(struct be_adapter *adapter)
 508{
 509        u32 sem;
 510
 511        if (BEx_chip(adapter))
 512                sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
 513        else
 514                pci_read_config_dword(adapter->pdev,
 515                                      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
 516
 517        return sem & POST_STAGE_MASK;
 518}
 519
 520int lancer_wait_ready(struct be_adapter *adapter)
 521{
 522#define SLIPORT_READY_TIMEOUT 30
 523        u32 sliport_status;
 524        int status = 0, i;
 525
 526        for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
 527                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
 528                if (sliport_status & SLIPORT_STATUS_RDY_MASK)
 529                        break;
 530
 531                msleep(1000);
 532        }
 533
 534        if (i == SLIPORT_READY_TIMEOUT)
 535                status = -1;
 536
 537        return status;
 538}
 539
 540static bool lancer_provisioning_error(struct be_adapter *adapter)
 541{
 542        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
 543        sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
 544        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
 545                sliport_err1 = ioread32(adapter->db +
 546                                        SLIPORT_ERROR1_OFFSET);
 547                sliport_err2 = ioread32(adapter->db +
 548                                        SLIPORT_ERROR2_OFFSET);
 549
 550                if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
 551                    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
 552                        return true;
 553        }
 554        return false;
 555}
 556
 557int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
 558{
 559        int status;
 560        u32 sliport_status, err, reset_needed;
 561        bool resource_error;
 562
 563        resource_error = lancer_provisioning_error(adapter);
 564        if (resource_error)
 565                return -EAGAIN;
 566
 567        status = lancer_wait_ready(adapter);
 568        if (!status) {
 569                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
 570                err = sliport_status & SLIPORT_STATUS_ERR_MASK;
 571                reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
 572                if (err && reset_needed) {
 573                        iowrite32(SLI_PORT_CONTROL_IP_MASK,
 574                                  adapter->db + SLIPORT_CONTROL_OFFSET);
 575
 576                        /* check adapter has corrected the error */
 577                        status = lancer_wait_ready(adapter);
 578                        sliport_status = ioread32(adapter->db +
 579                                                  SLIPORT_STATUS_OFFSET);
 580                        sliport_status &= (SLIPORT_STATUS_ERR_MASK |
 581                                                SLIPORT_STATUS_RN_MASK);
 582                        if (status || sliport_status)
 583                                status = -1;
 584                } else if (err || reset_needed) {
 585                        status = -1;
 586                }
 587        }
 588        /* Stop error recovery if error is not recoverable.
 589         * No resource error is temporary errors and will go away
 590         * when PF provisions resources.
 591         */
 592        resource_error = lancer_provisioning_error(adapter);
 593        if (resource_error)
 594                status = -EAGAIN;
 595
 596        return status;
 597}
 598
 599int be_fw_wait_ready(struct be_adapter *adapter)
 600{
 601        u16 stage;
 602        int status, timeout = 0;
 603        struct device *dev = &adapter->pdev->dev;
 604
 605        if (lancer_chip(adapter)) {
 606                status = lancer_wait_ready(adapter);
 607                return status;
 608        }
 609
 610        do {
 611                stage = be_POST_stage_get(adapter);
 612                if (stage == POST_STAGE_ARMFW_RDY)
 613                        return 0;
 614
 615                dev_info(dev, "Waiting for POST, %ds elapsed\n",
 616                         timeout);
 617                if (msleep_interruptible(2000)) {
 618                        dev_err(dev, "Waiting for POST aborted\n");
 619                        return -EINTR;
 620                }
 621                timeout += 2;
 622        } while (timeout < 60);
 623
 624        dev_err(dev, "POST timeout; stage=0x%x\n", stage);
 625        return -1;
 626}
 627
 628
 629static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
 630{
 631        return &wrb->payload.sgl[0];
 632}
 633
 634
 635/* Don't touch the hdr after it's prepared */
 636/* mem will be NULL for embedded commands */
 637static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 638                                u8 subsystem, u8 opcode, int cmd_len,
 639                                struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
 640{
 641        struct be_sge *sge;
 642        unsigned long addr = (unsigned long)req_hdr;
 643        u64 req_addr = addr;
 644
 645        req_hdr->opcode = opcode;
 646        req_hdr->subsystem = subsystem;
 647        req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
 648        req_hdr->version = 0;
 649
 650        wrb->tag0 = req_addr & 0xFFFFFFFF;
 651        wrb->tag1 = upper_32_bits(req_addr);
 652
 653        wrb->payload_length = cmd_len;
 654        if (mem) {
 655                wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
 656                        MCC_WRB_SGE_CNT_SHIFT;
 657                sge = nonembedded_sgl(wrb);
 658                sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
 659                sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
 660                sge->len = cpu_to_le32(mem->size);
 661        } else
 662                wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
 663        be_dws_cpu_to_le(wrb, 8);
 664}
 665
 666static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
 667                        struct be_dma_mem *mem)
 668{
 669        int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
 670        u64 dma = (u64)mem->dma;
 671
 672        for (i = 0; i < buf_pages; i++) {
 673                pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
 674                pages[i].hi = cpu_to_le32(upper_32_bits(dma));
 675                dma += PAGE_SIZE_4K;
 676        }
 677}
 678
 679/* Converts interrupt delay in microseconds to multiplier value */
 680static u32 eq_delay_to_mult(u32 usec_delay)
 681{
 682#define MAX_INTR_RATE                   651042
 683        const u32 round = 10;
 684        u32 multiplier;
 685
 686        if (usec_delay == 0)
 687                multiplier = 0;
 688        else {
 689                u32 interrupt_rate = 1000000 / usec_delay;
 690                /* Max delay, corresponding to the lowest interrupt rate */
 691                if (interrupt_rate == 0)
 692                        multiplier = 1023;
 693                else {
 694                        multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
 695                        multiplier /= interrupt_rate;
 696                        /* Round the multiplier to the closest value.*/
 697                        multiplier = (multiplier + round/2) / round;
 698                        multiplier = min(multiplier, (u32)1023);
 699                }
 700        }
 701        return multiplier;
 702}
 703
 704static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
 705{
 706        struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
 707        struct be_mcc_wrb *wrb
 708                = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 709        memset(wrb, 0, sizeof(*wrb));
 710        return wrb;
 711}
 712
 713static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
 714{
 715        struct be_queue_info *mccq = &adapter->mcc_obj.q;
 716        struct be_mcc_wrb *wrb;
 717
 718        if (!mccq->created)
 719                return NULL;
 720
 721        if (atomic_read(&mccq->used) >= mccq->len)
 722                return NULL;
 723
 724        wrb = queue_head_node(mccq);
 725        queue_head_inc(mccq);
 726        atomic_inc(&mccq->used);
 727        memset(wrb, 0, sizeof(*wrb));
 728        return wrb;
 729}
 730
 731/* Tell fw we're about to start firing cmds by writing a
 732 * special pattern across the wrb hdr; uses mbox
 733 */
 734int be_cmd_fw_init(struct be_adapter *adapter)
 735{
 736        u8 *wrb;
 737        int status;
 738
 739        if (lancer_chip(adapter))
 740                return 0;
 741
 742        if (mutex_lock_interruptible(&adapter->mbox_lock))
 743                return -1;
 744
 745        wrb = (u8 *)wrb_from_mbox(adapter);
 746        *wrb++ = 0xFF;
 747        *wrb++ = 0x12;
 748        *wrb++ = 0x34;
 749        *wrb++ = 0xFF;
 750        *wrb++ = 0xFF;
 751        *wrb++ = 0x56;
 752        *wrb++ = 0x78;
 753        *wrb = 0xFF;
 754
 755        status = be_mbox_notify_wait(adapter);
 756
 757        mutex_unlock(&adapter->mbox_lock);
 758        return status;
 759}
 760
 761/* Tell fw we're done with firing cmds by writing a
 762 * special pattern across the wrb hdr; uses mbox
 763 */
 764int be_cmd_fw_clean(struct be_adapter *adapter)
 765{
 766        u8 *wrb;
 767        int status;
 768
 769        if (lancer_chip(adapter))
 770                return 0;
 771
 772        if (mutex_lock_interruptible(&adapter->mbox_lock))
 773                return -1;
 774
 775        wrb = (u8 *)wrb_from_mbox(adapter);
 776        *wrb++ = 0xFF;
 777        *wrb++ = 0xAA;
 778        *wrb++ = 0xBB;
 779        *wrb++ = 0xFF;
 780        *wrb++ = 0xFF;
 781        *wrb++ = 0xCC;
 782        *wrb++ = 0xDD;
 783        *wrb = 0xFF;
 784
 785        status = be_mbox_notify_wait(adapter);
 786
 787        mutex_unlock(&adapter->mbox_lock);
 788        return status;
 789}
 790
 791int be_cmd_eq_create(struct be_adapter *adapter,
 792                struct be_queue_info *eq, int eq_delay)
 793{
 794        struct be_mcc_wrb *wrb;
 795        struct be_cmd_req_eq_create *req;
 796        struct be_dma_mem *q_mem = &eq->dma_mem;
 797        int status;
 798
 799        if (mutex_lock_interruptible(&adapter->mbox_lock))
 800                return -1;
 801
 802        wrb = wrb_from_mbox(adapter);
 803        req = embedded_payload(wrb);
 804
 805        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 806                OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
 807
 808        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 809
 810        AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
 811        /* 4byte eqe*/
 812        AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
 813        AMAP_SET_BITS(struct amap_eq_context, count, req->context,
 814                        __ilog2_u32(eq->len/256));
 815        AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
 816                        eq_delay_to_mult(eq_delay));
 817        be_dws_cpu_to_le(req->context, sizeof(req->context));
 818
 819        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 820
 821        status = be_mbox_notify_wait(adapter);
 822        if (!status) {
 823                struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
 824                eq->id = le16_to_cpu(resp->eq_id);
 825                eq->created = true;
 826        }
 827
 828        mutex_unlock(&adapter->mbox_lock);
 829        return status;
 830}
 831
 832/* Use MCC */
 833int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 834                          bool permanent, u32 if_handle, u32 pmac_id)
 835{
 836        struct be_mcc_wrb *wrb;
 837        struct be_cmd_req_mac_query *req;
 838        int status;
 839
 840        spin_lock_bh(&adapter->mcc_lock);
 841
 842        wrb = wrb_from_mccq(adapter);
 843        if (!wrb) {
 844                status = -EBUSY;
 845                goto err;
 846        }
 847        req = embedded_payload(wrb);
 848
 849        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 850                OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
 851        req->type = MAC_ADDRESS_TYPE_NETWORK;
 852        if (permanent) {
 853                req->permanent = 1;
 854        } else {
 855                req->if_id = cpu_to_le16((u16) if_handle);
 856                req->pmac_id = cpu_to_le32(pmac_id);
 857                req->permanent = 0;
 858        }
 859
 860        status = be_mcc_notify_wait(adapter);
 861        if (!status) {
 862                struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
 863                memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
 864        }
 865
 866err:
 867        spin_unlock_bh(&adapter->mcc_lock);
 868        return status;
 869}
 870
 871/* Uses synchronous MCCQ */
 872int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 873                u32 if_id, u32 *pmac_id, u32 domain)
 874{
 875        struct be_mcc_wrb *wrb;
 876        struct be_cmd_req_pmac_add *req;
 877        int status;
 878
 879        spin_lock_bh(&adapter->mcc_lock);
 880
 881        wrb = wrb_from_mccq(adapter);
 882        if (!wrb) {
 883                status = -EBUSY;
 884                goto err;
 885        }
 886        req = embedded_payload(wrb);
 887
 888        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 889                OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
 890
 891        req->hdr.domain = domain;
 892        req->if_id = cpu_to_le32(if_id);
 893        memcpy(req->mac_address, mac_addr, ETH_ALEN);
 894
 895        status = be_mcc_notify_wait(adapter);
 896        if (!status) {
 897                struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
 898                *pmac_id = le32_to_cpu(resp->pmac_id);
 899        }
 900
 901err:
 902        spin_unlock_bh(&adapter->mcc_lock);
 903
 904         if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
 905                status = -EPERM;
 906
 907        return status;
 908}
 909
 910/* Uses synchronous MCCQ */
 911int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
 912{
 913        struct be_mcc_wrb *wrb;
 914        struct be_cmd_req_pmac_del *req;
 915        int status;
 916
 917        if (pmac_id == -1)
 918                return 0;
 919
 920        spin_lock_bh(&adapter->mcc_lock);
 921
 922        wrb = wrb_from_mccq(adapter);
 923        if (!wrb) {
 924                status = -EBUSY;
 925                goto err;
 926        }
 927        req = embedded_payload(wrb);
 928
 929        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 930                OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
 931
 932        req->hdr.domain = dom;
 933        req->if_id = cpu_to_le32(if_id);
 934        req->pmac_id = cpu_to_le32(pmac_id);
 935
 936        status = be_mcc_notify_wait(adapter);
 937
 938err:
 939        spin_unlock_bh(&adapter->mcc_lock);
 940        return status;
 941}
 942
 943/* Uses Mbox */
 944int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
 945                struct be_queue_info *eq, bool no_delay, int coalesce_wm)
 946{
 947        struct be_mcc_wrb *wrb;
 948        struct be_cmd_req_cq_create *req;
 949        struct be_dma_mem *q_mem = &cq->dma_mem;
 950        void *ctxt;
 951        int status;
 952
 953        if (mutex_lock_interruptible(&adapter->mbox_lock))
 954                return -1;
 955
 956        wrb = wrb_from_mbox(adapter);
 957        req = embedded_payload(wrb);
 958        ctxt = &req->context;
 959
 960        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 961                OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
 962
 963        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 964
 965        if (BEx_chip(adapter)) {
 966                AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
 967                                                                coalesce_wm);
 968                AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
 969                                                                ctxt, no_delay);
 970                AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
 971                                                __ilog2_u32(cq->len/256));
 972                AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
 973                AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
 974                AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
 975        } else {
 976                req->hdr.version = 2;
 977                req->page_size = 1; /* 1 for 4K */
 978                AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
 979                                                                no_delay);
 980                AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
 981                                                __ilog2_u32(cq->len/256));
 982                AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
 983                AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
 984                                                                ctxt, 1);
 985                AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
 986                                                                ctxt, eq->id);
 987        }
 988
 989        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 990
 991        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 992
 993        status = be_mbox_notify_wait(adapter);
 994        if (!status) {
 995                struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
 996                cq->id = le16_to_cpu(resp->cq_id);
 997                cq->created = true;
 998        }
 999
1000        mutex_unlock(&adapter->mbox_lock);
1001
1002        return status;
1003}
1004
1005static u32 be_encoded_q_len(int q_len)
1006{
1007        u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1008        if (len_encoded == 16)
1009                len_encoded = 0;
1010        return len_encoded;
1011}
1012
1013int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1014                        struct be_queue_info *mccq,
1015                        struct be_queue_info *cq)
1016{
1017        struct be_mcc_wrb *wrb;
1018        struct be_cmd_req_mcc_ext_create *req;
1019        struct be_dma_mem *q_mem = &mccq->dma_mem;
1020        void *ctxt;
1021        int status;
1022
1023        if (mutex_lock_interruptible(&adapter->mbox_lock))
1024                return -1;
1025
1026        wrb = wrb_from_mbox(adapter);
1027        req = embedded_payload(wrb);
1028        ctxt = &req->context;
1029
1030        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1031                        OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1032
1033        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1034        if (lancer_chip(adapter)) {
1035                req->hdr.version = 1;
1036                req->cq_id = cpu_to_le16(cq->id);
1037
1038                AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1039                                                be_encoded_q_len(mccq->len));
1040                AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1041                AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1042                                                                ctxt, cq->id);
1043                AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1044                                                                 ctxt, 1);
1045
1046        } else {
1047                AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1048                AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1049                                                be_encoded_q_len(mccq->len));
1050                AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1051        }
1052
1053        /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1054        req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1055        req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1056        be_dws_cpu_to_le(ctxt, sizeof(req->context));
1057
1058        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1059
1060        status = be_mbox_notify_wait(adapter);
1061        if (!status) {
1062                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1063                mccq->id = le16_to_cpu(resp->id);
1064                mccq->created = true;
1065        }
1066        mutex_unlock(&adapter->mbox_lock);
1067
1068        return status;
1069}
1070
1071int be_cmd_mccq_org_create(struct be_adapter *adapter,
1072                        struct be_queue_info *mccq,
1073                        struct be_queue_info *cq)
1074{
1075        struct be_mcc_wrb *wrb;
1076        struct be_cmd_req_mcc_create *req;
1077        struct be_dma_mem *q_mem = &mccq->dma_mem;
1078        void *ctxt;
1079        int status;
1080
1081        if (mutex_lock_interruptible(&adapter->mbox_lock))
1082                return -1;
1083
1084        wrb = wrb_from_mbox(adapter);
1085        req = embedded_payload(wrb);
1086        ctxt = &req->context;
1087
1088        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1089                        OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1090
1091        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1092
1093        AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1094        AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1095                        be_encoded_q_len(mccq->len));
1096        AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1097
1098        be_dws_cpu_to_le(ctxt, sizeof(req->context));
1099
1100        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1101
1102        status = be_mbox_notify_wait(adapter);
1103        if (!status) {
1104                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1105                mccq->id = le16_to_cpu(resp->id);
1106                mccq->created = true;
1107        }
1108
1109        mutex_unlock(&adapter->mbox_lock);
1110        return status;
1111}
1112
1113int be_cmd_mccq_create(struct be_adapter *adapter,
1114                        struct be_queue_info *mccq,
1115                        struct be_queue_info *cq)
1116{
1117        int status;
1118
1119        status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1120        if (status && !lancer_chip(adapter)) {
1121                dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1122                        "or newer to avoid conflicting priorities between NIC "
1123                        "and FCoE traffic");
1124                status = be_cmd_mccq_org_create(adapter, mccq, cq);
1125        }
1126        return status;
1127}
1128
1129int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{
1131        struct be_mcc_wrb *wrb;
1132        struct be_cmd_req_eth_tx_create *req;
1133        struct be_queue_info *txq = &txo->q;
1134        struct be_queue_info *cq = &txo->cq;
1135        struct be_dma_mem *q_mem = &txq->dma_mem;
1136        int status, ver = 0;
1137
1138        spin_lock_bh(&adapter->mcc_lock);
1139
1140        wrb = wrb_from_mccq(adapter);
1141        if (!wrb) {
1142                status = -EBUSY;
1143                goto err;
1144        }
1145
1146        req = embedded_payload(wrb);
1147
1148        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1149                OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1150
1151        if (lancer_chip(adapter)) {
1152                req->hdr.version = 1;
1153                req->if_id = cpu_to_le16(adapter->if_handle);
1154        } else if (BEx_chip(adapter)) {
1155                if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1156                        req->hdr.version = 2;
1157        } else { /* For SH */
1158                req->hdr.version = 2;
1159        }
1160
1161        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1162        req->ulp_num = BE_ULP1_NUM;
1163        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1164        req->cq_id = cpu_to_le16(cq->id);
1165        req->queue_size = be_encoded_q_len(txq->len);
1166        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1167
1168        ver = req->hdr.version;
1169
1170        status = be_mcc_notify_wait(adapter);
1171        if (!status) {
1172                struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1173                txq->id = le16_to_cpu(resp->cid);
1174                if (ver == 2)
1175                        txo->db_offset = le32_to_cpu(resp->db_offset);
1176                else
1177                        txo->db_offset = DB_TXULP1_OFFSET;
1178                txq->created = true;
1179        }
1180
1181err:
1182        spin_unlock_bh(&adapter->mcc_lock);
1183
1184        return status;
1185}
1186
1187/* Uses MCC */
1188int be_cmd_rxq_create(struct be_adapter *adapter,
1189                struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1190                u32 if_id, u32 rss, u8 *rss_id)
1191{
1192        struct be_mcc_wrb *wrb;
1193        struct be_cmd_req_eth_rx_create *req;
1194        struct be_dma_mem *q_mem = &rxq->dma_mem;
1195        int status;
1196
1197        spin_lock_bh(&adapter->mcc_lock);
1198
1199        wrb = wrb_from_mccq(adapter);
1200        if (!wrb) {
1201                status = -EBUSY;
1202                goto err;
1203        }
1204        req = embedded_payload(wrb);
1205
1206        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1207                                OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1208
1209        req->cq_id = cpu_to_le16(cq_id);
1210        req->frag_size = fls(frag_size) - 1;
1211        req->num_pages = 2;
1212        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1213        req->interface_id = cpu_to_le32(if_id);
1214        req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1215        req->rss_queue = cpu_to_le32(rss);
1216
1217        status = be_mcc_notify_wait(adapter);
1218        if (!status) {
1219                struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1220                rxq->id = le16_to_cpu(resp->id);
1221                rxq->created = true;
1222                *rss_id = resp->rss_id;
1223        }
1224
1225err:
1226        spin_unlock_bh(&adapter->mcc_lock);
1227        return status;
1228}
1229
1230/* Generic destroyer function for all types of queues
1231 * Uses Mbox
1232 */
1233int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1234                int queue_type)
1235{
1236        struct be_mcc_wrb *wrb;
1237        struct be_cmd_req_q_destroy *req;
1238        u8 subsys = 0, opcode = 0;
1239        int status;
1240
1241        if (mutex_lock_interruptible(&adapter->mbox_lock))
1242                return -1;
1243
1244        wrb = wrb_from_mbox(adapter);
1245        req = embedded_payload(wrb);
1246
1247        switch (queue_type) {
1248        case QTYPE_EQ:
1249                subsys = CMD_SUBSYSTEM_COMMON;
1250                opcode = OPCODE_COMMON_EQ_DESTROY;
1251                break;
1252        case QTYPE_CQ:
1253                subsys = CMD_SUBSYSTEM_COMMON;
1254                opcode = OPCODE_COMMON_CQ_DESTROY;
1255                break;
1256        case QTYPE_TXQ:
1257                subsys = CMD_SUBSYSTEM_ETH;
1258                opcode = OPCODE_ETH_TX_DESTROY;
1259                break;
1260        case QTYPE_RXQ:
1261                subsys = CMD_SUBSYSTEM_ETH;
1262                opcode = OPCODE_ETH_RX_DESTROY;
1263                break;
1264        case QTYPE_MCCQ:
1265                subsys = CMD_SUBSYSTEM_COMMON;
1266                opcode = OPCODE_COMMON_MCC_DESTROY;
1267                break;
1268        default:
1269                BUG();
1270        }
1271
1272        be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1273                                NULL);
1274        req->id = cpu_to_le16(q->id);
1275
1276        status = be_mbox_notify_wait(adapter);
1277        q->created = false;
1278
1279        mutex_unlock(&adapter->mbox_lock);
1280        return status;
1281}
1282
1283/* Uses MCC */
1284int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1285{
1286        struct be_mcc_wrb *wrb;
1287        struct be_cmd_req_q_destroy *req;
1288        int status;
1289
1290        spin_lock_bh(&adapter->mcc_lock);
1291
1292        wrb = wrb_from_mccq(adapter);
1293        if (!wrb) {
1294                status = -EBUSY;
1295                goto err;
1296        }
1297        req = embedded_payload(wrb);
1298
1299        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1300                        OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1301        req->id = cpu_to_le16(q->id);
1302
1303        status = be_mcc_notify_wait(adapter);
1304        q->created = false;
1305
1306err:
1307        spin_unlock_bh(&adapter->mcc_lock);
1308        return status;
1309}
1310
1311/* Create an rx filtering policy configuration on an i/f
1312 * Uses MCCQ
1313 */
1314int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1315                     u32 *if_handle, u32 domain)
1316{
1317        struct be_mcc_wrb *wrb;
1318        struct be_cmd_req_if_create *req;
1319        int status;
1320
1321        spin_lock_bh(&adapter->mcc_lock);
1322
1323        wrb = wrb_from_mccq(adapter);
1324        if (!wrb) {
1325                status = -EBUSY;
1326                goto err;
1327        }
1328        req = embedded_payload(wrb);
1329
1330        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1331                OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1332        req->hdr.domain = domain;
1333        req->capability_flags = cpu_to_le32(cap_flags);
1334        req->enable_flags = cpu_to_le32(en_flags);
1335
1336        req->pmac_invalid = true;
1337
1338        status = be_mcc_notify_wait(adapter);
1339        if (!status) {
1340                struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1341                *if_handle = le32_to_cpu(resp->interface_id);
1342        }
1343
1344err:
1345        spin_unlock_bh(&adapter->mcc_lock);
1346        return status;
1347}
1348
1349/* Uses MCCQ */
1350int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1351{
1352        struct be_mcc_wrb *wrb;
1353        struct be_cmd_req_if_destroy *req;
1354        int status;
1355
1356        if (interface_id == -1)
1357                return 0;
1358
1359        spin_lock_bh(&adapter->mcc_lock);
1360
1361        wrb = wrb_from_mccq(adapter);
1362        if (!wrb) {
1363                status = -EBUSY;
1364                goto err;
1365        }
1366        req = embedded_payload(wrb);
1367
1368        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1369                OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1370        req->hdr.domain = domain;
1371        req->interface_id = cpu_to_le32(interface_id);
1372
1373        status = be_mcc_notify_wait(adapter);
1374err:
1375        spin_unlock_bh(&adapter->mcc_lock);
1376        return status;
1377}
1378
1379/* Get stats is a non embedded command: the request is not embedded inside
1380 * WRB but is a separate dma memory block
1381 * Uses asynchronous MCC
1382 */
1383int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1384{
1385        struct be_mcc_wrb *wrb;
1386        struct be_cmd_req_hdr *hdr;
1387        int status = 0;
1388
1389        spin_lock_bh(&adapter->mcc_lock);
1390
1391        wrb = wrb_from_mccq(adapter);
1392        if (!wrb) {
1393                status = -EBUSY;
1394                goto err;
1395        }
1396        hdr = nonemb_cmd->va;
1397
1398        be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1399                OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1400
1401        /* version 1 of the cmd is not supported only by BE2 */
1402        if (!BE2_chip(adapter))
1403                hdr->version = 1;
1404
1405        be_mcc_notify(adapter);
1406        adapter->stats_cmd_sent = true;
1407
1408err:
1409        spin_unlock_bh(&adapter->mcc_lock);
1410        return status;
1411}
1412
1413/* Lancer Stats */
1414int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1415                                struct be_dma_mem *nonemb_cmd)
1416{
1417
1418        struct be_mcc_wrb *wrb;
1419        struct lancer_cmd_req_pport_stats *req;
1420        int status = 0;
1421
1422        if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1423                            CMD_SUBSYSTEM_ETH))
1424                return -EPERM;
1425
1426        spin_lock_bh(&adapter->mcc_lock);
1427
1428        wrb = wrb_from_mccq(adapter);
1429        if (!wrb) {
1430                status = -EBUSY;
1431                goto err;
1432        }
1433        req = nonemb_cmd->va;
1434
1435        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1436                        OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1437                        nonemb_cmd);
1438
1439        req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1440        req->cmd_params.params.reset_stats = 0;
1441
1442        be_mcc_notify(adapter);
1443        adapter->stats_cmd_sent = true;
1444
1445err:
1446        spin_unlock_bh(&adapter->mcc_lock);
1447        return status;
1448}
1449
1450static int be_mac_to_link_speed(int mac_speed)
1451{
1452        switch (mac_speed) {
1453        case PHY_LINK_SPEED_ZERO:
1454                return 0;
1455        case PHY_LINK_SPEED_10MBPS:
1456                return 10;
1457        case PHY_LINK_SPEED_100MBPS:
1458                return 100;
1459        case PHY_LINK_SPEED_1GBPS:
1460                return 1000;
1461        case PHY_LINK_SPEED_10GBPS:
1462                return 10000;
1463        }
1464        return 0;
1465}
1466
1467/* Uses synchronous mcc
1468 * Returns link_speed in Mbps
1469 */
1470int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1471                             u8 *link_status, u32 dom)
1472{
1473        struct be_mcc_wrb *wrb;
1474        struct be_cmd_req_link_status *req;
1475        int status;
1476
1477        spin_lock_bh(&adapter->mcc_lock);
1478
1479        if (link_status)
1480                *link_status = LINK_DOWN;
1481
1482        wrb = wrb_from_mccq(adapter);
1483        if (!wrb) {
1484                status = -EBUSY;
1485                goto err;
1486        }
1487        req = embedded_payload(wrb);
1488
1489        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1490                OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1491
1492        /* version 1 of the cmd is not supported only by BE2 */
1493        if (!BE2_chip(adapter))
1494                req->hdr.version = 1;
1495
1496        req->hdr.domain = dom;
1497
1498        status = be_mcc_notify_wait(adapter);
1499        if (!status) {
1500                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1501                if (link_speed) {
1502                        *link_speed = resp->link_speed ?
1503                                      le16_to_cpu(resp->link_speed) * 10 :
1504                                      be_mac_to_link_speed(resp->mac_speed);
1505
1506                        if (!resp->logical_link_status)
1507                                *link_speed = 0;
1508                }
1509                if (link_status)
1510                        *link_status = resp->logical_link_status;
1511        }
1512
1513err:
1514        spin_unlock_bh(&adapter->mcc_lock);
1515        return status;
1516}
1517
1518/* Uses synchronous mcc */
1519int be_cmd_get_die_temperature(struct be_adapter *adapter)
1520{
1521        struct be_mcc_wrb *wrb;
1522        struct be_cmd_req_get_cntl_addnl_attribs *req;
1523        int status;
1524
1525        spin_lock_bh(&adapter->mcc_lock);
1526
1527        wrb = wrb_from_mccq(adapter);
1528        if (!wrb) {
1529                status = -EBUSY;
1530                goto err;
1531        }
1532        req = embedded_payload(wrb);
1533
1534        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1535                OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1536                wrb, NULL);
1537
1538        be_mcc_notify(adapter);
1539
1540err:
1541        spin_unlock_bh(&adapter->mcc_lock);
1542        return status;
1543}
1544
1545/* Uses synchronous mcc */
1546int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1547{
1548        struct be_mcc_wrb *wrb;
1549        struct be_cmd_req_get_fat *req;
1550        int status;
1551
1552        spin_lock_bh(&adapter->mcc_lock);
1553
1554        wrb = wrb_from_mccq(adapter);
1555        if (!wrb) {
1556                status = -EBUSY;
1557                goto err;
1558        }
1559        req = embedded_payload(wrb);
1560
1561        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1562                OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1563        req->fat_operation = cpu_to_le32(QUERY_FAT);
1564        status = be_mcc_notify_wait(adapter);
1565        if (!status) {
1566                struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1567                if (log_size && resp->log_size)
1568                        *log_size = le32_to_cpu(resp->log_size) -
1569                                        sizeof(u32);
1570        }
1571err:
1572        spin_unlock_bh(&adapter->mcc_lock);
1573        return status;
1574}
1575
1576void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1577{
1578        struct be_dma_mem get_fat_cmd;
1579        struct be_mcc_wrb *wrb;
1580        struct be_cmd_req_get_fat *req;
1581        u32 offset = 0, total_size, buf_size,
1582                                log_offset = sizeof(u32), payload_len;
1583        int status;
1584
1585        if (buf_len == 0)
1586                return;
1587
1588        total_size = buf_len;
1589
1590        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1591        get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1592                        get_fat_cmd.size,
1593                        &get_fat_cmd.dma);
1594        if (!get_fat_cmd.va) {
1595                status = -ENOMEM;
1596                dev_err(&adapter->pdev->dev,
1597                "Memory allocation failure while retrieving FAT data\n");
1598                return;
1599        }
1600
1601        spin_lock_bh(&adapter->mcc_lock);
1602
1603        while (total_size) {
1604                buf_size = min(total_size, (u32)60*1024);
1605                total_size -= buf_size;
1606
1607                wrb = wrb_from_mccq(adapter);
1608                if (!wrb) {
1609                        status = -EBUSY;
1610                        goto err;
1611                }
1612                req = get_fat_cmd.va;
1613
1614                payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1615                be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1616                                OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1617                                &get_fat_cmd);
1618
1619                req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1620                req->read_log_offset = cpu_to_le32(log_offset);
1621                req->read_log_length = cpu_to_le32(buf_size);
1622                req->data_buffer_size = cpu_to_le32(buf_size);
1623
1624                status = be_mcc_notify_wait(adapter);
1625                if (!status) {
1626                        struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1627                        memcpy(buf + offset,
1628                                resp->data_buffer,
1629                                le32_to_cpu(resp->read_log_length));
1630                } else {
1631                        dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1632                        goto err;
1633                }
1634                offset += buf_size;
1635                log_offset += buf_size;
1636        }
1637err:
1638        pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1639                        get_fat_cmd.va,
1640                        get_fat_cmd.dma);
1641        spin_unlock_bh(&adapter->mcc_lock);
1642}
1643
1644/* Uses synchronous mcc */
1645int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1646                        char *fw_on_flash)
1647{
1648        struct be_mcc_wrb *wrb;
1649        struct be_cmd_req_get_fw_version *req;
1650        int status;
1651
1652        spin_lock_bh(&adapter->mcc_lock);
1653
1654        wrb = wrb_from_mccq(adapter);
1655        if (!wrb) {
1656                status = -EBUSY;
1657                goto err;
1658        }
1659
1660        req = embedded_payload(wrb);
1661
1662        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1663                OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1664        status = be_mcc_notify_wait(adapter);
1665        if (!status) {
1666                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1667                strcpy(fw_ver, resp->firmware_version_string);
1668                if (fw_on_flash)
1669                        strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1670        }
1671err:
1672        spin_unlock_bh(&adapter->mcc_lock);
1673        return status;
1674}
1675
1676/* set the EQ delay interval of an EQ to specified value
1677 * Uses async mcc
1678 */
1679int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1680{
1681        struct be_mcc_wrb *wrb;
1682        struct be_cmd_req_modify_eq_delay *req;
1683        int status = 0;
1684
1685        spin_lock_bh(&adapter->mcc_lock);
1686
1687        wrb = wrb_from_mccq(adapter);
1688        if (!wrb) {
1689                status = -EBUSY;
1690                goto err;
1691        }
1692        req = embedded_payload(wrb);
1693
1694        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1695                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1696
1697        req->num_eq = cpu_to_le32(1);
1698        req->delay[0].eq_id = cpu_to_le32(eq_id);
1699        req->delay[0].phase = 0;
1700        req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1701
1702        be_mcc_notify(adapter);
1703
1704err:
1705        spin_unlock_bh(&adapter->mcc_lock);
1706        return status;
1707}
1708
1709/* Uses sycnhronous mcc */
1710int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1711                        u32 num, bool untagged, bool promiscuous)
1712{
1713        struct be_mcc_wrb *wrb;
1714        struct be_cmd_req_vlan_config *req;
1715        int status;
1716
1717        spin_lock_bh(&adapter->mcc_lock);
1718
1719        wrb = wrb_from_mccq(adapter);
1720        if (!wrb) {
1721                status = -EBUSY;
1722                goto err;
1723        }
1724        req = embedded_payload(wrb);
1725
1726        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1727                OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1728
1729        req->interface_id = if_id;
1730        req->promiscuous = promiscuous;
1731        req->untagged = untagged;
1732        req->num_vlan = num;
1733        if (!promiscuous) {
1734                memcpy(req->normal_vlan, vtag_array,
1735                        req->num_vlan * sizeof(vtag_array[0]));
1736        }
1737
1738        status = be_mcc_notify_wait(adapter);
1739
1740err:
1741        spin_unlock_bh(&adapter->mcc_lock);
1742        return status;
1743}
1744
1745int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1746{
1747        struct be_mcc_wrb *wrb;
1748        struct be_dma_mem *mem = &adapter->rx_filter;
1749        struct be_cmd_req_rx_filter *req = mem->va;
1750        int status;
1751
1752        spin_lock_bh(&adapter->mcc_lock);
1753
1754        wrb = wrb_from_mccq(adapter);
1755        if (!wrb) {
1756                status = -EBUSY;
1757                goto err;
1758        }
1759        memset(req, 0, sizeof(*req));
1760        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1761                                OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1762                                wrb, mem);
1763
1764        req->if_id = cpu_to_le32(adapter->if_handle);
1765        if (flags & IFF_PROMISC) {
1766                req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1767                                        BE_IF_FLAGS_VLAN_PROMISCUOUS |
1768                                        BE_IF_FLAGS_MCAST_PROMISCUOUS);
1769                if (value == ON)
1770                        req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1771                                                BE_IF_FLAGS_VLAN_PROMISCUOUS |
1772                                                BE_IF_FLAGS_MCAST_PROMISCUOUS);
1773        } else if (flags & IFF_ALLMULTI) {
1774                req->if_flags_mask = req->if_flags =
1775                                cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1776        } else {
1777                struct netdev_hw_addr *ha;
1778                int i = 0;
1779
1780                req->if_flags_mask = req->if_flags =
1781                                cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1782
1783                /* Reset mcast promisc mode if already set by setting mask
1784                 * and not setting flags field
1785                 */
1786                req->if_flags_mask |=
1787                        cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1788                                    adapter->if_cap_flags);
1789
1790                req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1791                netdev_for_each_mc_addr(ha, adapter->netdev)
1792                        memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1793        }
1794
1795        status = be_mcc_notify_wait(adapter);
1796err:
1797        spin_unlock_bh(&adapter->mcc_lock);
1798        return status;
1799}
1800
1801/* Uses synchrounous mcc */
1802int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1803{
1804        struct be_mcc_wrb *wrb;
1805        struct be_cmd_req_set_flow_control *req;
1806        int status;
1807
1808        if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1809                            CMD_SUBSYSTEM_COMMON))
1810                return -EPERM;
1811
1812        spin_lock_bh(&adapter->mcc_lock);
1813
1814        wrb = wrb_from_mccq(adapter);
1815        if (!wrb) {
1816                status = -EBUSY;
1817                goto err;
1818        }
1819        req = embedded_payload(wrb);
1820
1821        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1822                OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1823
1824        req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1825        req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1826
1827        status = be_mcc_notify_wait(adapter);
1828
1829err:
1830        spin_unlock_bh(&adapter->mcc_lock);
1831        return status;
1832}
1833
1834/* Uses sycn mcc */
1835int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1836{
1837        struct be_mcc_wrb *wrb;
1838        struct be_cmd_req_get_flow_control *req;
1839        int status;
1840
1841        if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1842                            CMD_SUBSYSTEM_COMMON))
1843                return -EPERM;
1844
1845        spin_lock_bh(&adapter->mcc_lock);
1846
1847        wrb = wrb_from_mccq(adapter);
1848        if (!wrb) {
1849                status = -EBUSY;
1850                goto err;
1851        }
1852        req = embedded_payload(wrb);
1853
1854        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1855                OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1856
1857        status = be_mcc_notify_wait(adapter);
1858        if (!status) {
1859                struct be_cmd_resp_get_flow_control *resp =
1860                                                embedded_payload(wrb);
1861                *tx_fc = le16_to_cpu(resp->tx_flow_control);
1862                *rx_fc = le16_to_cpu(resp->rx_flow_control);
1863        }
1864
1865err:
1866        spin_unlock_bh(&adapter->mcc_lock);
1867        return status;
1868}
1869
1870/* Uses mbox */
1871int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1872                        u32 *mode, u32 *caps, u16 *asic_rev)
1873{
1874        struct be_mcc_wrb *wrb;
1875        struct be_cmd_req_query_fw_cfg *req;
1876        int status;
1877
1878        if (mutex_lock_interruptible(&adapter->mbox_lock))
1879                return -1;
1880
1881        wrb = wrb_from_mbox(adapter);
1882        req = embedded_payload(wrb);
1883
1884        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1885                OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1886
1887        status = be_mbox_notify_wait(adapter);
1888        if (!status) {
1889                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1890                *port_num = le32_to_cpu(resp->phys_port);
1891                *mode = le32_to_cpu(resp->function_mode);
1892                *caps = le32_to_cpu(resp->function_caps);
1893                *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1894        }
1895
1896        mutex_unlock(&adapter->mbox_lock);
1897        return status;
1898}
1899
1900/* Uses mbox */
1901int be_cmd_reset_function(struct be_adapter *adapter)
1902{
1903        struct be_mcc_wrb *wrb;
1904        struct be_cmd_req_hdr *req;
1905        int status;
1906
1907        if (lancer_chip(adapter)) {
1908                status = lancer_wait_ready(adapter);
1909                if (!status) {
1910                        iowrite32(SLI_PORT_CONTROL_IP_MASK,
1911                                  adapter->db + SLIPORT_CONTROL_OFFSET);
1912                        status = lancer_test_and_set_rdy_state(adapter);
1913                }
1914                if (status) {
1915                        dev_err(&adapter->pdev->dev,
1916                                "Adapter in non recoverable error\n");
1917                }
1918                return status;
1919        }
1920
1921        if (mutex_lock_interruptible(&adapter->mbox_lock))
1922                return -1;
1923
1924        wrb = wrb_from_mbox(adapter);
1925        req = embedded_payload(wrb);
1926
1927        be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1928                OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1929
1930        status = be_mbox_notify_wait(adapter);
1931
1932        mutex_unlock(&adapter->mbox_lock);
1933        return status;
1934}
1935
1936int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1937                        u32 rss_hash_opts, u16 table_size)
1938{
1939        struct be_mcc_wrb *wrb;
1940        struct be_cmd_req_rss_config *req;
1941        u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1942                        0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1943                        0x3ea83c02, 0x4a110304};
1944        int status;
1945
1946        if (mutex_lock_interruptible(&adapter->mbox_lock))
1947                return -1;
1948
1949        wrb = wrb_from_mbox(adapter);
1950        req = embedded_payload(wrb);
1951
1952        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1953                OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1954
1955        req->if_id = cpu_to_le32(adapter->if_handle);
1956        req->enable_rss = cpu_to_le16(rss_hash_opts);
1957        req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1958
1959        if (lancer_chip(adapter) || skyhawk_chip(adapter))
1960                req->hdr.version = 1;
1961
1962        memcpy(req->cpu_table, rsstable, table_size);
1963        memcpy(req->hash, myhash, sizeof(myhash));
1964        be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1965
1966        status = be_mbox_notify_wait(adapter);
1967
1968        mutex_unlock(&adapter->mbox_lock);
1969        return status;
1970}
1971
1972/* Uses sync mcc */
1973int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1974                        u8 bcn, u8 sts, u8 state)
1975{
1976        struct be_mcc_wrb *wrb;
1977        struct be_cmd_req_enable_disable_beacon *req;
1978        int status;
1979
1980        spin_lock_bh(&adapter->mcc_lock);
1981
1982        wrb = wrb_from_mccq(adapter);
1983        if (!wrb) {
1984                status = -EBUSY;
1985                goto err;
1986        }
1987        req = embedded_payload(wrb);
1988
1989        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1990                OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1991
1992        req->port_num = port_num;
1993        req->beacon_state = state;
1994        req->beacon_duration = bcn;
1995        req->status_duration = sts;
1996
1997        status = be_mcc_notify_wait(adapter);
1998
1999err:
2000        spin_unlock_bh(&adapter->mcc_lock);
2001        return status;
2002}
2003
2004/* Uses sync mcc */
2005int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2006{
2007        struct be_mcc_wrb *wrb;
2008        struct be_cmd_req_get_beacon_state *req;
2009        int status;
2010
2011        spin_lock_bh(&adapter->mcc_lock);
2012
2013        wrb = wrb_from_mccq(adapter);
2014        if (!wrb) {
2015                status = -EBUSY;
2016                goto err;
2017        }
2018        req = embedded_payload(wrb);
2019
2020        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2021                OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
2022
2023        req->port_num = port_num;
2024
2025        status = be_mcc_notify_wait(adapter);
2026        if (!status) {
2027                struct be_cmd_resp_get_beacon_state *resp =
2028                                                embedded_payload(wrb);
2029                *state = resp->beacon_state;
2030        }
2031
2032err:
2033        spin_unlock_bh(&adapter->mcc_lock);
2034        return status;
2035}
2036
2037int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2038                            u32 data_size, u32 data_offset,
2039                            const char *obj_name, u32 *data_written,
2040                            u8 *change_status, u8 *addn_status)
2041{
2042        struct be_mcc_wrb *wrb;
2043        struct lancer_cmd_req_write_object *req;
2044        struct lancer_cmd_resp_write_object *resp;
2045        void *ctxt = NULL;
2046        int status;
2047
2048        spin_lock_bh(&adapter->mcc_lock);
2049        adapter->flash_status = 0;
2050
2051        wrb = wrb_from_mccq(adapter);
2052        if (!wrb) {
2053                status = -EBUSY;
2054                goto err_unlock;
2055        }
2056
2057        req = embedded_payload(wrb);
2058
2059        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2060                                OPCODE_COMMON_WRITE_OBJECT,
2061                                sizeof(struct lancer_cmd_req_write_object), wrb,
2062                                NULL);
2063
2064        ctxt = &req->context;
2065        AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2066                        write_length, ctxt, data_size);
2067
2068        if (data_size == 0)
2069                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2070                                eof, ctxt, 1);
2071        else
2072                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2073                                eof, ctxt, 0);
2074
2075        be_dws_cpu_to_le(ctxt, sizeof(req->context));
2076        req->write_offset = cpu_to_le32(data_offset);
2077        strcpy(req->object_name, obj_name);
2078        req->descriptor_count = cpu_to_le32(1);
2079        req->buf_len = cpu_to_le32(data_size);
2080        req->addr_low = cpu_to_le32((cmd->dma +
2081                                sizeof(struct lancer_cmd_req_write_object))
2082                                & 0xFFFFFFFF);
2083        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2084                                sizeof(struct lancer_cmd_req_write_object)));
2085
2086        be_mcc_notify(adapter);
2087        spin_unlock_bh(&adapter->mcc_lock);
2088
2089        if (!wait_for_completion_timeout(&adapter->flash_compl,
2090                                         msecs_to_jiffies(60000)))
2091                status = -1;
2092        else
2093                status = adapter->flash_status;
2094
2095        resp = embedded_payload(wrb);
2096        if (!status) {
2097                *data_written = le32_to_cpu(resp->actual_write_len);
2098                *change_status = resp->change_status;
2099        } else {
2100                *addn_status = resp->additional_status;
2101        }
2102
2103        return status;
2104
2105err_unlock:
2106        spin_unlock_bh(&adapter->mcc_lock);
2107        return status;
2108}
2109
2110int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2111                u32 data_size, u32 data_offset, const char *obj_name,
2112                u32 *data_read, u32 *eof, u8 *addn_status)
2113{
2114        struct be_mcc_wrb *wrb;
2115        struct lancer_cmd_req_read_object *req;
2116        struct lancer_cmd_resp_read_object *resp;
2117        int status;
2118
2119        spin_lock_bh(&adapter->mcc_lock);
2120
2121        wrb = wrb_from_mccq(adapter);
2122        if (!wrb) {
2123                status = -EBUSY;
2124                goto err_unlock;
2125        }
2126
2127        req = embedded_payload(wrb);
2128
2129        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2130                        OPCODE_COMMON_READ_OBJECT,
2131                        sizeof(struct lancer_cmd_req_read_object), wrb,
2132                        NULL);
2133
2134        req->desired_read_len = cpu_to_le32(data_size);
2135        req->read_offset = cpu_to_le32(data_offset);
2136        strcpy(req->object_name, obj_name);
2137        req->descriptor_count = cpu_to_le32(1);
2138        req->buf_len = cpu_to_le32(data_size);
2139        req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2140        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2141
2142        status = be_mcc_notify_wait(adapter);
2143
2144        resp = embedded_payload(wrb);
2145        if (!status) {
2146                *data_read = le32_to_cpu(resp->actual_read_len);
2147                *eof = le32_to_cpu(resp->eof);
2148        } else {
2149                *addn_status = resp->additional_status;
2150        }
2151
2152err_unlock:
2153        spin_unlock_bh(&adapter->mcc_lock);
2154        return status;
2155}
2156
2157int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2158                        u32 flash_type, u32 flash_opcode, u32 buf_size)
2159{
2160        struct be_mcc_wrb *wrb;
2161        struct be_cmd_write_flashrom *req;
2162        int status;
2163
2164        spin_lock_bh(&adapter->mcc_lock);
2165        adapter->flash_status = 0;
2166
2167        wrb = wrb_from_mccq(adapter);
2168        if (!wrb) {
2169                status = -EBUSY;
2170                goto err_unlock;
2171        }
2172        req = cmd->va;
2173
2174        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2175                OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2176
2177        req->params.op_type = cpu_to_le32(flash_type);
2178        req->params.op_code = cpu_to_le32(flash_opcode);
2179        req->params.data_buf_size = cpu_to_le32(buf_size);
2180
2181        be_mcc_notify(adapter);
2182        spin_unlock_bh(&adapter->mcc_lock);
2183
2184        if (!wait_for_completion_timeout(&adapter->flash_compl,
2185                        msecs_to_jiffies(40000)))
2186                status = -1;
2187        else
2188                status = adapter->flash_status;
2189
2190        return status;
2191
2192err_unlock:
2193        spin_unlock_bh(&adapter->mcc_lock);
2194        return status;
2195}
2196
2197int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2198                         int offset)
2199{
2200        struct be_mcc_wrb *wrb;
2201        struct be_cmd_read_flash_crc *req;
2202        int status;
2203
2204        spin_lock_bh(&adapter->mcc_lock);
2205
2206        wrb = wrb_from_mccq(adapter);
2207        if (!wrb) {
2208                status = -EBUSY;
2209                goto err;
2210        }
2211        req = embedded_payload(wrb);
2212
2213        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2214                               OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2215                               wrb, NULL);
2216
2217        req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2218        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2219        req->params.offset = cpu_to_le32(offset);
2220        req->params.data_buf_size = cpu_to_le32(0x4);
2221
2222        status = be_mcc_notify_wait(adapter);
2223        if (!status)
2224                memcpy(flashed_crc, req->crc, 4);
2225
2226err:
2227        spin_unlock_bh(&adapter->mcc_lock);
2228        return status;
2229}
2230
2231int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2232                                struct be_dma_mem *nonemb_cmd)
2233{
2234        struct be_mcc_wrb *wrb;
2235        struct be_cmd_req_acpi_wol_magic_config *req;
2236        int status;
2237
2238        spin_lock_bh(&adapter->mcc_lock);
2239
2240        wrb = wrb_from_mccq(adapter);
2241        if (!wrb) {
2242                status = -EBUSY;
2243                goto err;
2244        }
2245        req = nonemb_cmd->va;
2246
2247        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2248                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2249                nonemb_cmd);
2250        memcpy(req->magic_mac, mac, ETH_ALEN);
2251
2252        status = be_mcc_notify_wait(adapter);
2253
2254err:
2255        spin_unlock_bh(&adapter->mcc_lock);
2256        return status;
2257}
2258
2259int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2260                        u8 loopback_type, u8 enable)
2261{
2262        struct be_mcc_wrb *wrb;
2263        struct be_cmd_req_set_lmode *req;
2264        int status;
2265
2266        spin_lock_bh(&adapter->mcc_lock);
2267
2268        wrb = wrb_from_mccq(adapter);
2269        if (!wrb) {
2270                status = -EBUSY;
2271                goto err;
2272        }
2273
2274        req = embedded_payload(wrb);
2275
2276        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2277                        OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2278                        NULL);
2279
2280        req->src_port = port_num;
2281        req->dest_port = port_num;
2282        req->loopback_type = loopback_type;
2283        req->loopback_state = enable;
2284
2285        status = be_mcc_notify_wait(adapter);
2286err:
2287        spin_unlock_bh(&adapter->mcc_lock);
2288        return status;
2289}
2290
2291int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2292                u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2293{
2294        struct be_mcc_wrb *wrb;
2295        struct be_cmd_req_loopback_test *req;
2296        int status;
2297
2298        spin_lock_bh(&adapter->mcc_lock);
2299
2300        wrb = wrb_from_mccq(adapter);
2301        if (!wrb) {
2302                status = -EBUSY;
2303                goto err;
2304        }
2305
2306        req = embedded_payload(wrb);
2307
2308        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2309                        OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2310        req->hdr.timeout = cpu_to_le32(4);
2311
2312        req->pattern = cpu_to_le64(pattern);
2313        req->src_port = cpu_to_le32(port_num);
2314        req->dest_port = cpu_to_le32(port_num);
2315        req->pkt_size = cpu_to_le32(pkt_size);
2316        req->num_pkts = cpu_to_le32(num_pkts);
2317        req->loopback_type = cpu_to_le32(loopback_type);
2318
2319        status = be_mcc_notify_wait(adapter);
2320        if (!status) {
2321                struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2322                status = le32_to_cpu(resp->status);
2323        }
2324
2325err:
2326        spin_unlock_bh(&adapter->mcc_lock);
2327        return status;
2328}
2329
2330int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2331                                u32 byte_cnt, struct be_dma_mem *cmd)
2332{
2333        struct be_mcc_wrb *wrb;
2334        struct be_cmd_req_ddrdma_test *req;
2335        int status;
2336        int i, j = 0;
2337
2338        spin_lock_bh(&adapter->mcc_lock);
2339
2340        wrb = wrb_from_mccq(adapter);
2341        if (!wrb) {
2342                status = -EBUSY;
2343                goto err;
2344        }
2345        req = cmd->va;
2346        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2347                        OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2348
2349        req->pattern = cpu_to_le64(pattern);
2350        req->byte_count = cpu_to_le32(byte_cnt);
2351        for (i = 0; i < byte_cnt; i++) {
2352                req->snd_buff[i] = (u8)(pattern >> (j*8));
2353                j++;
2354                if (j > 7)
2355                        j = 0;
2356        }
2357
2358        status = be_mcc_notify_wait(adapter);
2359
2360        if (!status) {
2361                struct be_cmd_resp_ddrdma_test *resp;
2362                resp = cmd->va;
2363                if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2364                                resp->snd_err) {
2365                        status = -1;
2366                }
2367        }
2368
2369err:
2370        spin_unlock_bh(&adapter->mcc_lock);
2371        return status;
2372}
2373
2374int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2375                                struct be_dma_mem *nonemb_cmd)
2376{
2377        struct be_mcc_wrb *wrb;
2378        struct be_cmd_req_seeprom_read *req;
2379        int status;
2380
2381        spin_lock_bh(&adapter->mcc_lock);
2382
2383        wrb = wrb_from_mccq(adapter);
2384        if (!wrb) {
2385                status = -EBUSY;
2386                goto err;
2387        }
2388        req = nonemb_cmd->va;
2389
2390        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2391                        OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2392                        nonemb_cmd);
2393
2394        status = be_mcc_notify_wait(adapter);
2395
2396err:
2397        spin_unlock_bh(&adapter->mcc_lock);
2398        return status;
2399}
2400
2401int be_cmd_get_phy_info(struct be_adapter *adapter)
2402{
2403        struct be_mcc_wrb *wrb;
2404        struct be_cmd_req_get_phy_info *req;
2405        struct be_dma_mem cmd;
2406        int status;
2407
2408        if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2409                            CMD_SUBSYSTEM_COMMON))
2410                return -EPERM;
2411
2412        spin_lock_bh(&adapter->mcc_lock);
2413
2414        wrb = wrb_from_mccq(adapter);
2415        if (!wrb) {
2416                status = -EBUSY;
2417                goto err;
2418        }
2419        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2420        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2421                                        &cmd.dma);
2422        if (!cmd.va) {
2423                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2424                status = -ENOMEM;
2425                goto err;
2426        }
2427
2428        req = cmd.va;
2429
2430        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2431                        OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2432                        wrb, &cmd);
2433
2434        status = be_mcc_notify_wait(adapter);
2435        if (!status) {
2436                struct be_phy_info *resp_phy_info =
2437                                cmd.va + sizeof(struct be_cmd_req_hdr);
2438                adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2439                adapter->phy.interface_type =
2440                        le16_to_cpu(resp_phy_info->interface_type);
2441                adapter->phy.auto_speeds_supported =
2442                        le16_to_cpu(resp_phy_info->auto_speeds_supported);
2443                adapter->phy.fixed_speeds_supported =
2444                        le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2445                adapter->phy.misc_params =
2446                        le32_to_cpu(resp_phy_info->misc_params);
2447        }
2448        pci_free_consistent(adapter->pdev, cmd.size,
2449                                cmd.va, cmd.dma);
2450err:
2451        spin_unlock_bh(&adapter->mcc_lock);
2452        return status;
2453}
2454
2455int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2456{
2457        struct be_mcc_wrb *wrb;
2458        struct be_cmd_req_set_qos *req;
2459        int status;
2460
2461        spin_lock_bh(&adapter->mcc_lock);
2462
2463        wrb = wrb_from_mccq(adapter);
2464        if (!wrb) {
2465                status = -EBUSY;
2466                goto err;
2467        }
2468
2469        req = embedded_payload(wrb);
2470
2471        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2472                        OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2473
2474        req->hdr.domain = domain;
2475        req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2476        req->max_bps_nic = cpu_to_le32(bps);
2477
2478        status = be_mcc_notify_wait(adapter);
2479
2480err:
2481        spin_unlock_bh(&adapter->mcc_lock);
2482        return status;
2483}
2484
2485int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2486{
2487        struct be_mcc_wrb *wrb;
2488        struct be_cmd_req_cntl_attribs *req;
2489        struct be_cmd_resp_cntl_attribs *resp;
2490        int status;
2491        int payload_len = max(sizeof(*req), sizeof(*resp));
2492        struct mgmt_controller_attrib *attribs;
2493        struct be_dma_mem attribs_cmd;
2494
2495        if (mutex_lock_interruptible(&adapter->mbox_lock))
2496                return -1;
2497
2498        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2499        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2500        attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2501                                                &attribs_cmd.dma);
2502        if (!attribs_cmd.va) {
2503                dev_err(&adapter->pdev->dev,
2504                                "Memory allocation failure\n");
2505                status = -ENOMEM;
2506                goto err;
2507        }
2508
2509        wrb = wrb_from_mbox(adapter);
2510        if (!wrb) {
2511                status = -EBUSY;
2512                goto err;
2513        }
2514        req = attribs_cmd.va;
2515
2516        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2517                         OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2518                        &attribs_cmd);
2519
2520        status = be_mbox_notify_wait(adapter);
2521        if (!status) {
2522                attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2523                adapter->hba_port_num = attribs->hba_attribs.phy_port;
2524        }
2525
2526err:
2527        mutex_unlock(&adapter->mbox_lock);
2528        if (attribs_cmd.va)
2529                pci_free_consistent(adapter->pdev, attribs_cmd.size,
2530                                    attribs_cmd.va, attribs_cmd.dma);
2531        return status;
2532}
2533
2534/* Uses mbox */
2535int be_cmd_req_native_mode(struct be_adapter *adapter)
2536{
2537        struct be_mcc_wrb *wrb;
2538        struct be_cmd_req_set_func_cap *req;
2539        int status;
2540
2541        if (mutex_lock_interruptible(&adapter->mbox_lock))
2542                return -1;
2543
2544        wrb = wrb_from_mbox(adapter);
2545        if (!wrb) {
2546                status = -EBUSY;
2547                goto err;
2548        }
2549
2550        req = embedded_payload(wrb);
2551
2552        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2553                OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2554
2555        req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2556                                CAPABILITY_BE3_NATIVE_ERX_API);
2557        req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2558
2559        status = be_mbox_notify_wait(adapter);
2560        if (!status) {
2561                struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2562                adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2563                                        CAPABILITY_BE3_NATIVE_ERX_API;
2564                if (!adapter->be3_native)
2565                        dev_warn(&adapter->pdev->dev,
2566                                 "adapter not in advanced mode\n");
2567        }
2568err:
2569        mutex_unlock(&adapter->mbox_lock);
2570        return status;
2571}
2572
2573/* Get privilege(s) for a function */
2574int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2575                             u32 domain)
2576{
2577        struct be_mcc_wrb *wrb;
2578        struct be_cmd_req_get_fn_privileges *req;
2579        int status;
2580
2581        spin_lock_bh(&adapter->mcc_lock);
2582
2583        wrb = wrb_from_mccq(adapter);
2584        if (!wrb) {
2585                status = -EBUSY;
2586                goto err;
2587        }
2588
2589        req = embedded_payload(wrb);
2590
2591        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2592                               OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2593                               wrb, NULL);
2594
2595        req->hdr.domain = domain;
2596
2597        status = be_mcc_notify_wait(adapter);
2598        if (!status) {
2599                struct be_cmd_resp_get_fn_privileges *resp =
2600                                                embedded_payload(wrb);
2601                *privilege = le32_to_cpu(resp->privilege_mask);
2602        }
2603
2604err:
2605        spin_unlock_bh(&adapter->mcc_lock);
2606        return status;
2607}
2608
2609/* Uses synchronous MCCQ */
2610int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2611                             bool *pmac_id_active, u32 *pmac_id, u8 domain)
2612{
2613        struct be_mcc_wrb *wrb;
2614        struct be_cmd_req_get_mac_list *req;
2615        int status;
2616        int mac_count;
2617        struct be_dma_mem get_mac_list_cmd;
2618        int i;
2619
2620        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2621        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2622        get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2623                        get_mac_list_cmd.size,
2624                        &get_mac_list_cmd.dma);
2625
2626        if (!get_mac_list_cmd.va) {
2627                dev_err(&adapter->pdev->dev,
2628                                "Memory allocation failure during GET_MAC_LIST\n");
2629                return -ENOMEM;
2630        }
2631
2632        spin_lock_bh(&adapter->mcc_lock);
2633
2634        wrb = wrb_from_mccq(adapter);
2635        if (!wrb) {
2636                status = -EBUSY;
2637                goto out;
2638        }
2639
2640        req = get_mac_list_cmd.va;
2641
2642        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2643                               OPCODE_COMMON_GET_MAC_LIST,
2644                               get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2645        req->hdr.domain = domain;
2646        req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2647        req->perm_override = 1;
2648
2649        status = be_mcc_notify_wait(adapter);
2650        if (!status) {
2651                struct be_cmd_resp_get_mac_list *resp =
2652                                                get_mac_list_cmd.va;
2653                mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2654                /* Mac list returned could contain one or more active mac_ids
2655                 * or one or more true or pseudo permanant mac addresses.
2656                 * If an active mac_id is present, return first active mac_id
2657                 * found.
2658                 */
2659                for (i = 0; i < mac_count; i++) {
2660                        struct get_list_macaddr *mac_entry;
2661                        u16 mac_addr_size;
2662                        u32 mac_id;
2663
2664                        mac_entry = &resp->macaddr_list[i];
2665                        mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2666                        /* mac_id is a 32 bit value and mac_addr size
2667                         * is 6 bytes
2668                         */
2669                        if (mac_addr_size == sizeof(u32)) {
2670                                *pmac_id_active = true;
2671                                mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2672                                *pmac_id = le32_to_cpu(mac_id);
2673                                goto out;
2674                        }
2675                }
2676                /* If no active mac_id found, return first mac addr */
2677                *pmac_id_active = false;
2678                memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2679                                                                ETH_ALEN);
2680        }
2681
2682out:
2683        spin_unlock_bh(&adapter->mcc_lock);
2684        pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2685                        get_mac_list_cmd.va, get_mac_list_cmd.dma);
2686        return status;
2687}
2688
2689/* Uses synchronous MCCQ */
2690int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2691                        u8 mac_count, u32 domain)
2692{
2693        struct be_mcc_wrb *wrb;
2694        struct be_cmd_req_set_mac_list *req;
2695        int status;
2696        struct be_dma_mem cmd;
2697
2698        memset(&cmd, 0, sizeof(struct be_dma_mem));
2699        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2700        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2701                        &cmd.dma, GFP_KERNEL);
2702        if (!cmd.va)
2703                return -ENOMEM;
2704
2705        spin_lock_bh(&adapter->mcc_lock);
2706
2707        wrb = wrb_from_mccq(adapter);
2708        if (!wrb) {
2709                status = -EBUSY;
2710                goto err;
2711        }
2712
2713        req = cmd.va;
2714        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2715                                OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2716                                wrb, &cmd);
2717
2718        req->hdr.domain = domain;
2719        req->mac_count = mac_count;
2720        if (mac_count)
2721                memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2722
2723        status = be_mcc_notify_wait(adapter);
2724
2725err:
2726        dma_free_coherent(&adapter->pdev->dev, cmd.size,
2727                                cmd.va, cmd.dma);
2728        spin_unlock_bh(&adapter->mcc_lock);
2729        return status;
2730}
2731
2732int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2733                        u32 domain, u16 intf_id)
2734{
2735        struct be_mcc_wrb *wrb;
2736        struct be_cmd_req_set_hsw_config *req;
2737        void *ctxt;
2738        int status;
2739
2740        spin_lock_bh(&adapter->mcc_lock);
2741
2742        wrb = wrb_from_mccq(adapter);
2743        if (!wrb) {
2744                status = -EBUSY;
2745                goto err;
2746        }
2747
2748        req = embedded_payload(wrb);
2749        ctxt = &req->context;
2750
2751        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2752                        OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2753
2754        req->hdr.domain = domain;
2755        AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2756        if (pvid) {
2757                AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2758                AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2759        }
2760
2761        be_dws_cpu_to_le(req->context, sizeof(req->context));
2762        status = be_mcc_notify_wait(adapter);
2763
2764err:
2765        spin_unlock_bh(&adapter->mcc_lock);
2766        return status;
2767}
2768
2769/* Get Hyper switch config */
2770int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2771                        u32 domain, u16 intf_id)
2772{
2773        struct be_mcc_wrb *wrb;
2774        struct be_cmd_req_get_hsw_config *req;
2775        void *ctxt;
2776        int status;
2777        u16 vid;
2778
2779        spin_lock_bh(&adapter->mcc_lock);
2780
2781        wrb = wrb_from_mccq(adapter);
2782        if (!wrb) {
2783                status = -EBUSY;
2784                goto err;
2785        }
2786
2787        req = embedded_payload(wrb);
2788        ctxt = &req->context;
2789
2790        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2791                        OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2792
2793        req->hdr.domain = domain;
2794        AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2795                                                                intf_id);
2796        AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2797        be_dws_cpu_to_le(req->context, sizeof(req->context));
2798
2799        status = be_mcc_notify_wait(adapter);
2800        if (!status) {
2801                struct be_cmd_resp_get_hsw_config *resp =
2802                                                embedded_payload(wrb);
2803                be_dws_le_to_cpu(&resp->context,
2804                                                sizeof(resp->context));
2805                vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2806                                                        pvid, &resp->context);
2807                *pvid = le16_to_cpu(vid);
2808        }
2809
2810err:
2811        spin_unlock_bh(&adapter->mcc_lock);
2812        return status;
2813}
2814
2815int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2816{
2817        struct be_mcc_wrb *wrb;
2818        struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2819        int status;
2820        int payload_len = sizeof(*req);
2821        struct be_dma_mem cmd;
2822
2823        if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2824                            CMD_SUBSYSTEM_ETH))
2825                return -EPERM;
2826
2827        if (mutex_lock_interruptible(&adapter->mbox_lock))
2828                return -1;
2829
2830        memset(&cmd, 0, sizeof(struct be_dma_mem));
2831        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2832        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2833                                               &cmd.dma);
2834        if (!cmd.va) {
2835                dev_err(&adapter->pdev->dev,
2836                                "Memory allocation failure\n");
2837                status = -ENOMEM;
2838                goto err;
2839        }
2840
2841        wrb = wrb_from_mbox(adapter);
2842        if (!wrb) {
2843                status = -EBUSY;
2844                goto err;
2845        }
2846
2847        req = cmd.va;
2848
2849        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2850                               OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2851                               payload_len, wrb, &cmd);
2852
2853        req->hdr.version = 1;
2854        req->query_options = BE_GET_WOL_CAP;
2855
2856        status = be_mbox_notify_wait(adapter);
2857        if (!status) {
2858                struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2859                resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2860
2861                /* the command could succeed misleadingly on old f/w
2862                 * which is not aware of the V1 version. fake an error. */
2863                if (resp->hdr.response_length < payload_len) {
2864                        status = -1;
2865                        goto err;
2866                }
2867                adapter->wol_cap = resp->wol_settings;
2868        }
2869err:
2870        mutex_unlock(&adapter->mbox_lock);
2871        if (cmd.va)
2872                pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2873        return status;
2874
2875}
2876int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2877                                   struct be_dma_mem *cmd)
2878{
2879        struct be_mcc_wrb *wrb;
2880        struct be_cmd_req_get_ext_fat_caps *req;
2881        int status;
2882
2883        if (mutex_lock_interruptible(&adapter->mbox_lock))
2884                return -1;
2885
2886        wrb = wrb_from_mbox(adapter);
2887        if (!wrb) {
2888                status = -EBUSY;
2889                goto err;
2890        }
2891
2892        req = cmd->va;
2893        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2894                               OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2895                               cmd->size, wrb, cmd);
2896        req->parameter_type = cpu_to_le32(1);
2897
2898        status = be_mbox_notify_wait(adapter);
2899err:
2900        mutex_unlock(&adapter->mbox_lock);
2901        return status;
2902}
2903
2904int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2905                                   struct be_dma_mem *cmd,
2906                                   struct be_fat_conf_params *configs)
2907{
2908        struct be_mcc_wrb *wrb;
2909        struct be_cmd_req_set_ext_fat_caps *req;
2910        int status;
2911
2912        spin_lock_bh(&adapter->mcc_lock);
2913
2914        wrb = wrb_from_mccq(adapter);
2915        if (!wrb) {
2916                status = -EBUSY;
2917                goto err;
2918        }
2919
2920        req = cmd->va;
2921        memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2922        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2923                               OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2924                               cmd->size, wrb, cmd);
2925
2926        status = be_mcc_notify_wait(adapter);
2927err:
2928        spin_unlock_bh(&adapter->mcc_lock);
2929        return status;
2930}
2931
2932int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2933{
2934        struct be_mcc_wrb *wrb;
2935        struct be_cmd_req_get_port_name *req;
2936        int status;
2937
2938        if (!lancer_chip(adapter)) {
2939                *port_name = adapter->hba_port_num + '0';
2940                return 0;
2941        }
2942
2943        spin_lock_bh(&adapter->mcc_lock);
2944
2945        wrb = wrb_from_mccq(adapter);
2946        if (!wrb) {
2947                status = -EBUSY;
2948                goto err;
2949        }
2950
2951        req = embedded_payload(wrb);
2952
2953        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2954                               OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2955                               NULL);
2956        req->hdr.version = 1;
2957
2958        status = be_mcc_notify_wait(adapter);
2959        if (!status) {
2960                struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2961                *port_name = resp->port_name[adapter->hba_port_num];
2962        } else {
2963                *port_name = adapter->hba_port_num + '0';
2964        }
2965err:
2966        spin_unlock_bh(&adapter->mcc_lock);
2967        return status;
2968}
2969
2970static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2971                                                    u32 max_buf_size)
2972{
2973        struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2974        int i;
2975
2976        for (i = 0; i < desc_count; i++) {
2977                desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
2978                if (((void *)desc + desc->desc_len) >
2979                    (void *)(buf + max_buf_size))
2980                        return NULL;
2981
2982                if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
2983                    desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
2984                        return desc;
2985
2986                desc = (void *)desc + desc->desc_len;
2987        }
2988
2989        return NULL;
2990}
2991
2992/* Uses Mbox */
2993int be_cmd_get_func_config(struct be_adapter *adapter)
2994{
2995        struct be_mcc_wrb *wrb;
2996        struct be_cmd_req_get_func_config *req;
2997        int status;
2998        struct be_dma_mem cmd;
2999
3000        if (mutex_lock_interruptible(&adapter->mbox_lock))
3001                return -1;
3002
3003        memset(&cmd, 0, sizeof(struct be_dma_mem));
3004        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3005        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3006                                      &cmd.dma);
3007        if (!cmd.va) {
3008                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3009                status = -ENOMEM;
3010                goto err;
3011        }
3012
3013        wrb = wrb_from_mbox(adapter);
3014        if (!wrb) {
3015                status = -EBUSY;
3016                goto err;
3017        }
3018
3019        req = cmd.va;
3020
3021        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3022                               OPCODE_COMMON_GET_FUNC_CONFIG,
3023                               cmd.size, wrb, &cmd);
3024
3025        if (skyhawk_chip(adapter))
3026                req->hdr.version = 1;
3027
3028        status = be_mbox_notify_wait(adapter);
3029        if (!status) {
3030                struct be_cmd_resp_get_func_config *resp = cmd.va;
3031                u32 desc_count = le32_to_cpu(resp->desc_count);
3032                struct be_nic_resource_desc *desc;
3033
3034                desc = be_get_nic_desc(resp->func_param, desc_count,
3035                                       sizeof(resp->func_param));
3036                if (!desc) {
3037                        status = -EINVAL;
3038                        goto err;
3039                }
3040
3041                adapter->pf_number = desc->pf_num;
3042                adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3043                adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3044                adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3045                adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3046                adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3047                adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3048
3049                adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3050                adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3051
3052                /* Clear flags that driver is not interested in */
3053                adapter->if_cap_flags &=  BE_IF_CAP_FLAGS_WANT;
3054        }
3055err:
3056        mutex_unlock(&adapter->mbox_lock);
3057        if (cmd.va)
3058                pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3059        return status;
3060}
3061
3062/* Uses mbox */
3063int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3064                                   u8 domain, struct be_dma_mem *cmd)
3065{
3066        struct be_mcc_wrb *wrb;
3067        struct be_cmd_req_get_profile_config *req;
3068        int status;
3069
3070        if (mutex_lock_interruptible(&adapter->mbox_lock))
3071                return -1;
3072        wrb = wrb_from_mbox(adapter);
3073
3074        req = cmd->va;
3075        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3076                               OPCODE_COMMON_GET_PROFILE_CONFIG,
3077                               cmd->size, wrb, cmd);
3078
3079        req->type = ACTIVE_PROFILE_TYPE;
3080        req->hdr.domain = domain;
3081        if (!lancer_chip(adapter))
3082                req->hdr.version = 1;
3083
3084        status = be_mbox_notify_wait(adapter);
3085
3086        mutex_unlock(&adapter->mbox_lock);
3087        return status;
3088}
3089
3090/* Uses sync mcc */
3091int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3092                                   u8 domain, struct be_dma_mem *cmd)
3093{
3094        struct be_mcc_wrb *wrb;
3095        struct be_cmd_req_get_profile_config *req;
3096        int status;
3097
3098        spin_lock_bh(&adapter->mcc_lock);
3099
3100        wrb = wrb_from_mccq(adapter);
3101        if (!wrb) {
3102                status = -EBUSY;
3103                goto err;
3104        }
3105
3106        req = cmd->va;
3107        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3108                               OPCODE_COMMON_GET_PROFILE_CONFIG,
3109                               cmd->size, wrb, cmd);
3110
3111        req->type = ACTIVE_PROFILE_TYPE;
3112        req->hdr.domain = domain;
3113        if (!lancer_chip(adapter))
3114                req->hdr.version = 1;
3115
3116        status = be_mcc_notify_wait(adapter);
3117
3118err:
3119        spin_unlock_bh(&adapter->mcc_lock);
3120        return status;
3121}
3122
3123/* Uses sync mcc, if MCCQ is already created otherwise mbox */
3124int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3125                              u16 *txq_count, u8 domain)
3126{
3127        struct be_queue_info *mccq = &adapter->mcc_obj.q;
3128        struct be_dma_mem cmd;
3129        int status;
3130
3131        memset(&cmd, 0, sizeof(struct be_dma_mem));
3132        if (!lancer_chip(adapter))
3133                cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
3134        else
3135                cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3136        cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3137                                      &cmd.dma);
3138        if (!cmd.va) {
3139                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3140                return -ENOMEM;
3141        }
3142
3143        if (!mccq->created)
3144                status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3145        else
3146                status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3147        if (!status) {
3148                struct be_cmd_resp_get_profile_config *resp = cmd.va;
3149                u32 desc_count = le32_to_cpu(resp->desc_count);
3150                struct be_nic_resource_desc *desc;
3151
3152                desc = be_get_nic_desc(resp->func_param, desc_count,
3153                                       sizeof(resp->func_param));
3154
3155                if (!desc) {
3156                        status = -EINVAL;
3157                        goto err;
3158                }
3159                if (cap_flags)
3160                        *cap_flags = le32_to_cpu(desc->cap_flags);
3161                if (txq_count)
3162                        *txq_count = le32_to_cpu(desc->txq_count);
3163        }
3164err:
3165        if (cmd.va)
3166                pci_free_consistent(adapter->pdev, cmd.size,
3167                                    cmd.va, cmd.dma);
3168        return status;
3169}
3170
3171/* Uses sync mcc */
3172int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3173                              u8 domain)
3174{
3175        struct be_mcc_wrb *wrb;
3176        struct be_cmd_req_set_profile_config *req;
3177        int status;
3178
3179        spin_lock_bh(&adapter->mcc_lock);
3180
3181        wrb = wrb_from_mccq(adapter);
3182        if (!wrb) {
3183                status = -EBUSY;
3184                goto err;
3185        }
3186
3187        req = embedded_payload(wrb);
3188
3189        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3190                               OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3191                               wrb, NULL);
3192
3193        req->hdr.domain = domain;
3194        req->desc_count = cpu_to_le32(1);
3195
3196        req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3197        req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3198        req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3199        req->nic_desc.pf_num = adapter->pf_number;
3200        req->nic_desc.vf_num = domain;
3201
3202        /* Mark fields invalid */
3203        req->nic_desc.unicast_mac_count = 0xFFFF;
3204        req->nic_desc.mcc_count = 0xFFFF;
3205        req->nic_desc.vlan_count = 0xFFFF;
3206        req->nic_desc.mcast_mac_count = 0xFFFF;
3207        req->nic_desc.txq_count = 0xFFFF;
3208        req->nic_desc.rq_count = 0xFFFF;
3209        req->nic_desc.rssq_count = 0xFFFF;
3210        req->nic_desc.lro_count = 0xFFFF;
3211        req->nic_desc.cq_count = 0xFFFF;
3212        req->nic_desc.toe_conn_count = 0xFFFF;
3213        req->nic_desc.eq_count = 0xFFFF;
3214        req->nic_desc.link_param = 0xFF;
3215        req->nic_desc.bw_min = 0xFFFFFFFF;
3216        req->nic_desc.acpi_params = 0xFF;
3217        req->nic_desc.wol_param = 0x0F;
3218
3219        /* Change BW */
3220        req->nic_desc.bw_min = cpu_to_le32(bps);
3221        req->nic_desc.bw_max = cpu_to_le32(bps);
3222        status = be_mcc_notify_wait(adapter);
3223err:
3224        spin_unlock_bh(&adapter->mcc_lock);
3225        return status;
3226}
3227
3228int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3229                     int vf_num)
3230{
3231        struct be_mcc_wrb *wrb;
3232        struct be_cmd_req_get_iface_list *req;
3233        struct be_cmd_resp_get_iface_list *resp;
3234        int status;
3235
3236        spin_lock_bh(&adapter->mcc_lock);
3237
3238        wrb = wrb_from_mccq(adapter);
3239        if (!wrb) {
3240                status = -EBUSY;
3241                goto err;
3242        }
3243        req = embedded_payload(wrb);
3244
3245        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3246                               OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3247                               wrb, NULL);
3248        req->hdr.domain = vf_num + 1;
3249
3250        status = be_mcc_notify_wait(adapter);
3251        if (!status) {
3252                resp = (struct be_cmd_resp_get_iface_list *)req;
3253                vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3254        }
3255
3256err:
3257        spin_unlock_bh(&adapter->mcc_lock);
3258        return status;
3259}
3260
3261static int lancer_wait_idle(struct be_adapter *adapter)
3262{
3263#define SLIPORT_IDLE_TIMEOUT 30
3264        u32 reg_val;
3265        int status = 0, i;
3266
3267        for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3268                reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3269                if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3270                        break;
3271
3272                ssleep(1);
3273        }
3274
3275        if (i == SLIPORT_IDLE_TIMEOUT)
3276                status = -1;
3277
3278        return status;
3279}
3280
3281int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3282{
3283        int status = 0;
3284
3285        status = lancer_wait_idle(adapter);
3286        if (status)
3287                return status;
3288
3289        iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3290
3291        return status;
3292}
3293
3294/* Routine to check whether dump image is present or not */
3295bool dump_present(struct be_adapter *adapter)
3296{
3297        u32 sliport_status = 0;
3298
3299        sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3300        return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3301}
3302
3303int lancer_initiate_dump(struct be_adapter *adapter)
3304{
3305        int status;
3306
3307        /* give firmware reset and diagnostic dump */
3308        status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3309                                     PHYSDEV_CONTROL_DD_MASK);
3310        if (status < 0) {
3311                dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3312                return status;
3313        }
3314
3315        status = lancer_wait_idle(adapter);
3316        if (status)
3317                return status;
3318
3319        if (!dump_present(adapter)) {
3320                dev_err(&adapter->pdev->dev, "Dump image not present\n");
3321                return -1;
3322        }
3323
3324        return 0;
3325}
3326
3327/* Uses sync mcc */
3328int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3329{
3330        struct be_mcc_wrb *wrb;
3331        struct be_cmd_enable_disable_vf *req;
3332        int status;
3333
3334        if (!lancer_chip(adapter))
3335                return 0;
3336
3337        spin_lock_bh(&adapter->mcc_lock);
3338
3339        wrb = wrb_from_mccq(adapter);
3340        if (!wrb) {
3341                status = -EBUSY;
3342                goto err;
3343        }
3344
3345        req = embedded_payload(wrb);
3346
3347        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3348                               OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3349                               wrb, NULL);
3350
3351        req->hdr.domain = domain;
3352        req->enable = 1;
3353        status = be_mcc_notify_wait(adapter);
3354err:
3355        spin_unlock_bh(&adapter->mcc_lock);
3356        return status;
3357}
3358
3359int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3360{
3361        struct be_mcc_wrb *wrb;
3362        struct be_cmd_req_intr_set *req;
3363        int status;
3364
3365        if (mutex_lock_interruptible(&adapter->mbox_lock))
3366                return -1;
3367
3368        wrb = wrb_from_mbox(adapter);
3369
3370        req = embedded_payload(wrb);
3371
3372        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3373                               OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3374                               wrb, NULL);
3375
3376        req->intr_enabled = intr_enable;
3377
3378        status = be_mbox_notify_wait(adapter);
3379
3380        mutex_unlock(&adapter->mbox_lock);
3381        return status;
3382}
3383
3384int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3385                        int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3386{
3387        struct be_adapter *adapter = netdev_priv(netdev_handle);
3388        struct be_mcc_wrb *wrb;
3389        struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3390        struct be_cmd_req_hdr *req;
3391        struct be_cmd_resp_hdr *resp;
3392        int status;
3393
3394        spin_lock_bh(&adapter->mcc_lock);
3395
3396        wrb = wrb_from_mccq(adapter);
3397        if (!wrb) {
3398                status = -EBUSY;
3399                goto err;
3400        }
3401        req = embedded_payload(wrb);
3402        resp = embedded_payload(wrb);
3403
3404        be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3405                               hdr->opcode, wrb_payload_size, wrb, NULL);
3406        memcpy(req, wrb_payload, wrb_payload_size);
3407        be_dws_cpu_to_le(req, wrb_payload_size);
3408
3409        status = be_mcc_notify_wait(adapter);
3410        if (cmd_status)
3411                *cmd_status = (status & 0xffff);
3412        if (ext_status)
3413                *ext_status = 0;
3414        memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3415        be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3416err:
3417        spin_unlock_bh(&adapter->mcc_lock);
3418        return status;
3419}
3420EXPORT_SYMBOL(be_roce_mcc_cmd);
3421