linux/drivers/net/benet/be_cmds.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2010 ServerEngines
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@serverengines.com
  12 *
  13 * ServerEngines
  14 * 209 N. Fair Oaks Ave
  15 * Sunnyvale, CA 94085
  16 */
  17
  18#include "be.h"
  19#include "be_cmds.h"
  20
  21static void be_mcc_notify(struct be_adapter *adapter)
  22{
  23        struct be_queue_info *mccq = &adapter->mcc_obj.q;
  24        u32 val = 0;
  25
  26        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  27        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  28
  29        wmb();
  30        iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  31}
  32
  33/* To check if valid bit is set, check the entire word as we don't know
  34 * the endianness of the data (old entry is host endian while a new entry is
  35 * little endian) */
  36static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  37{
  38        if (compl->flags != 0) {
  39                compl->flags = le32_to_cpu(compl->flags);
  40                BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  41                return true;
  42        } else {
  43                return false;
  44        }
  45}
  46
  47/* Need to reset the entire word that houses the valid bit */
  48static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  49{
  50        compl->flags = 0;
  51}
  52
  53static int be_mcc_compl_process(struct be_adapter *adapter,
  54        struct be_mcc_compl *compl)
  55{
  56        u16 compl_status, extd_status;
  57
  58        /* Just swap the status to host endian; mcc tag is opaquely copied
  59         * from mcc_wrb */
  60        be_dws_le_to_cpu(compl, 4);
  61
  62        compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  63                                CQE_STATUS_COMPL_MASK;
  64
  65        if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
  66                (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
  67                adapter->flash_status = compl_status;
  68                complete(&adapter->flash_compl);
  69        }
  70
  71        if (compl_status == MCC_STATUS_SUCCESS) {
  72                if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
  73                        struct be_cmd_resp_get_stats *resp =
  74                                                adapter->stats_cmd.va;
  75                        be_dws_le_to_cpu(&resp->hw_stats,
  76                                                sizeof(resp->hw_stats));
  77                        netdev_stats_update(adapter);
  78                        adapter->stats_ioctl_sent = false;
  79                }
  80        } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
  81                   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
  82                extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  83                                CQE_STATUS_EXTD_MASK;
  84                dev_warn(&adapter->pdev->dev,
  85                "Error in cmd completion - opcode %d, compl %d, extd %d\n",
  86                        compl->tag0, compl_status, extd_status);
  87        }
  88        return compl_status;
  89}
  90
  91/* Link state evt is a string of bytes; no need for endian swapping */
  92static void be_async_link_state_process(struct be_adapter *adapter,
  93                struct be_async_event_link_state *evt)
  94{
  95        be_link_status_update(adapter,
  96                evt->port_link_status == ASYNC_EVENT_LINK_UP);
  97}
  98
  99/* Grp5 CoS Priority evt */
 100static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 101                struct be_async_event_grp5_cos_priority *evt)
 102{
 103        if (evt->valid) {
 104                adapter->vlan_prio_bmap = evt->available_priority_bmap;
 105                adapter->recommended_prio =
 106                        evt->reco_default_priority << VLAN_PRIO_SHIFT;
 107        }
 108}
 109
 110/* Grp5 QOS Speed evt */
 111static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
 112                struct be_async_event_grp5_qos_link_speed *evt)
 113{
 114        if (evt->physical_port == adapter->port_num) {
 115                /* qos_link_speed is in units of 10 Mbps */
 116                adapter->link_speed = evt->qos_link_speed * 10;
 117        }
 118}
 119
 120static void be_async_grp5_evt_process(struct be_adapter *adapter,
 121                u32 trailer, struct be_mcc_compl *evt)
 122{
 123        u8 event_type = 0;
 124
 125        event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
 126                ASYNC_TRAILER_EVENT_TYPE_MASK;
 127
 128        switch (event_type) {
 129        case ASYNC_EVENT_COS_PRIORITY:
 130                be_async_grp5_cos_priority_process(adapter,
 131                (struct be_async_event_grp5_cos_priority *)evt);
 132        break;
 133        case ASYNC_EVENT_QOS_SPEED:
 134                be_async_grp5_qos_speed_process(adapter,
 135                (struct be_async_event_grp5_qos_link_speed *)evt);
 136        break;
 137        default:
 138                dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
 139                break;
 140        }
 141}
 142
 143static inline bool is_link_state_evt(u32 trailer)
 144{
 145        return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 146                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 147                                ASYNC_EVENT_CODE_LINK_STATE;
 148}
 149
 150static inline bool is_grp5_evt(u32 trailer)
 151{
 152        return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 153                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 154                                ASYNC_EVENT_CODE_GRP_5);
 155}
 156
 157static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
 158{
 159        struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
 160        struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
 161
 162        if (be_mcc_compl_is_new(compl)) {
 163                queue_tail_inc(mcc_cq);
 164                return compl;
 165        }
 166        return NULL;
 167}
 168
 169void be_async_mcc_enable(struct be_adapter *adapter)
 170{
 171        spin_lock_bh(&adapter->mcc_cq_lock);
 172
 173        be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
 174        adapter->mcc_obj.rearm_cq = true;
 175
 176        spin_unlock_bh(&adapter->mcc_cq_lock);
 177}
 178
 179void be_async_mcc_disable(struct be_adapter *adapter)
 180{
 181        adapter->mcc_obj.rearm_cq = false;
 182}
 183
 184int be_process_mcc(struct be_adapter *adapter, int *status)
 185{
 186        struct be_mcc_compl *compl;
 187        int num = 0;
 188        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 189
 190        spin_lock_bh(&adapter->mcc_cq_lock);
 191        while ((compl = be_mcc_compl_get(adapter))) {
 192                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 193                        /* Interpret flags as an async trailer */
 194                        if (is_link_state_evt(compl->flags))
 195                                be_async_link_state_process(adapter,
 196                                (struct be_async_event_link_state *) compl);
 197                        else if (is_grp5_evt(compl->flags))
 198                                be_async_grp5_evt_process(adapter,
 199                                compl->flags, compl);
 200                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
 201                                *status = be_mcc_compl_process(adapter, compl);
 202                                atomic_dec(&mcc_obj->q.used);
 203                }
 204                be_mcc_compl_use(compl);
 205                num++;
 206        }
 207
 208        spin_unlock_bh(&adapter->mcc_cq_lock);
 209        return num;
 210}
 211
 212/* Wait till no more pending mcc requests are present */
 213static int be_mcc_wait_compl(struct be_adapter *adapter)
 214{
 215#define mcc_timeout             120000 /* 12s timeout */
 216        int i, num, status = 0;
 217        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 218
 219        for (i = 0; i < mcc_timeout; i++) {
 220                num = be_process_mcc(adapter, &status);
 221                if (num)
 222                        be_cq_notify(adapter, mcc_obj->cq.id,
 223                                mcc_obj->rearm_cq, num);
 224
 225                if (atomic_read(&mcc_obj->q.used) == 0)
 226                        break;
 227                udelay(100);
 228        }
 229        if (i == mcc_timeout) {
 230                dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
 231                return -1;
 232        }
 233        return status;
 234}
 235
 236/* Notify MCC requests and wait for completion */
 237static int be_mcc_notify_wait(struct be_adapter *adapter)
 238{
 239        be_mcc_notify(adapter);
 240        return be_mcc_wait_compl(adapter);
 241}
 242
 243static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 244{
 245        int msecs = 0;
 246        u32 ready;
 247
 248        do {
 249                ready = ioread32(db);
 250                if (ready == 0xffffffff) {
 251                        dev_err(&adapter->pdev->dev,
 252                                "pci slot disconnected\n");
 253                        return -1;
 254                }
 255
 256                ready &= MPU_MAILBOX_DB_RDY_MASK;
 257                if (ready)
 258                        break;
 259
 260                if (msecs > 4000) {
 261                        dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
 262                        be_detect_dump_ue(adapter);
 263                        return -1;
 264                }
 265
 266                set_current_state(TASK_INTERRUPTIBLE);
 267                schedule_timeout(msecs_to_jiffies(1));
 268                msecs++;
 269        } while (true);
 270
 271        return 0;
 272}
 273
 274/*
 275 * Insert the mailbox address into the doorbell in two steps
 276 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
 277 */
 278static int be_mbox_notify_wait(struct be_adapter *adapter)
 279{
 280        int status;
 281        u32 val = 0;
 282        void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
 283        struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
 284        struct be_mcc_mailbox *mbox = mbox_mem->va;
 285        struct be_mcc_compl *compl = &mbox->compl;
 286
 287        /* wait for ready to be set */
 288        status = be_mbox_db_ready_wait(adapter, db);
 289        if (status != 0)
 290                return status;
 291
 292        val |= MPU_MAILBOX_DB_HI_MASK;
 293        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
 294        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
 295        iowrite32(val, db);
 296
 297        /* wait for ready to be set */
 298        status = be_mbox_db_ready_wait(adapter, db);
 299        if (status != 0)
 300                return status;
 301
 302        val = 0;
 303        /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
 304        val |= (u32)(mbox_mem->dma >> 4) << 2;
 305        iowrite32(val, db);
 306
 307        status = be_mbox_db_ready_wait(adapter, db);
 308        if (status != 0)
 309                return status;
 310
 311        /* A cq entry has been made now */
 312        if (be_mcc_compl_is_new(compl)) {
 313                status = be_mcc_compl_process(adapter, &mbox->compl);
 314                be_mcc_compl_use(compl);
 315                if (status)
 316                        return status;
 317        } else {
 318                dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
 319                return -1;
 320        }
 321        return 0;
 322}
 323
 324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
 325{
 326        u32 sem;
 327
 328        if (lancer_chip(adapter))
 329                sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
 330        else
 331                sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
 332
 333        *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
 334        if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
 335                return -1;
 336        else
 337                return 0;
 338}
 339
 340int be_cmd_POST(struct be_adapter *adapter)
 341{
 342        u16 stage;
 343        int status, timeout = 0;
 344
 345        do {
 346                status = be_POST_stage_get(adapter, &stage);
 347                if (status) {
 348                        dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
 349                                stage);
 350                        return -1;
 351                } else if (stage != POST_STAGE_ARMFW_RDY) {
 352                        set_current_state(TASK_INTERRUPTIBLE);
 353                        schedule_timeout(2 * HZ);
 354                        timeout += 2;
 355                } else {
 356                        return 0;
 357                }
 358        } while (timeout < 40);
 359
 360        dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
 361        return -1;
 362}
 363
 364static inline void *embedded_payload(struct be_mcc_wrb *wrb)
 365{
 366        return wrb->payload.embedded_payload;
 367}
 368
 369static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
 370{
 371        return &wrb->payload.sgl[0];
 372}
 373
 374/* Don't touch the hdr after it's prepared */
 375static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 376                                bool embedded, u8 sge_cnt, u32 opcode)
 377{
 378        if (embedded)
 379                wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
 380        else
 381                wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
 382                                MCC_WRB_SGE_CNT_SHIFT;
 383        wrb->payload_length = payload_len;
 384        wrb->tag0 = opcode;
 385        be_dws_cpu_to_le(wrb, 8);
 386}
 387
 388/* Don't touch the hdr after it's prepared */
 389static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 390                                u8 subsystem, u8 opcode, int cmd_len)
 391{
 392        req_hdr->opcode = opcode;
 393        req_hdr->subsystem = subsystem;
 394        req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
 395        req_hdr->version = 0;
 396}
 397
 398static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
 399                        struct be_dma_mem *mem)
 400{
 401        int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
 402        u64 dma = (u64)mem->dma;
 403
 404        for (i = 0; i < buf_pages; i++) {
 405                pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
 406                pages[i].hi = cpu_to_le32(upper_32_bits(dma));
 407                dma += PAGE_SIZE_4K;
 408        }
 409}
 410
 411/* Converts interrupt delay in microseconds to multiplier value */
 412static u32 eq_delay_to_mult(u32 usec_delay)
 413{
 414#define MAX_INTR_RATE                   651042
 415        const u32 round = 10;
 416        u32 multiplier;
 417
 418        if (usec_delay == 0)
 419                multiplier = 0;
 420        else {
 421                u32 interrupt_rate = 1000000 / usec_delay;
 422                /* Max delay, corresponding to the lowest interrupt rate */
 423                if (interrupt_rate == 0)
 424                        multiplier = 1023;
 425                else {
 426                        multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
 427                        multiplier /= interrupt_rate;
 428                        /* Round the multiplier to the closest value.*/
 429                        multiplier = (multiplier + round/2) / round;
 430                        multiplier = min(multiplier, (u32)1023);
 431                }
 432        }
 433        return multiplier;
 434}
 435
 436static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
 437{
 438        struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
 439        struct be_mcc_wrb *wrb
 440                = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 441        memset(wrb, 0, sizeof(*wrb));
 442        return wrb;
 443}
 444
 445static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
 446{
 447        struct be_queue_info *mccq = &adapter->mcc_obj.q;
 448        struct be_mcc_wrb *wrb;
 449
 450        if (atomic_read(&mccq->used) >= mccq->len) {
 451                dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
 452                return NULL;
 453        }
 454
 455        wrb = queue_head_node(mccq);
 456        queue_head_inc(mccq);
 457        atomic_inc(&mccq->used);
 458        memset(wrb, 0, sizeof(*wrb));
 459        return wrb;
 460}
 461
 462/* Tell fw we're about to start firing cmds by writing a
 463 * special pattern across the wrb hdr; uses mbox
 464 */
 465int be_cmd_fw_init(struct be_adapter *adapter)
 466{
 467        u8 *wrb;
 468        int status;
 469
 470        if (mutex_lock_interruptible(&adapter->mbox_lock))
 471                return -1;
 472
 473        wrb = (u8 *)wrb_from_mbox(adapter);
 474        *wrb++ = 0xFF;
 475        *wrb++ = 0x12;
 476        *wrb++ = 0x34;
 477        *wrb++ = 0xFF;
 478        *wrb++ = 0xFF;
 479        *wrb++ = 0x56;
 480        *wrb++ = 0x78;
 481        *wrb = 0xFF;
 482
 483        status = be_mbox_notify_wait(adapter);
 484
 485        mutex_unlock(&adapter->mbox_lock);
 486        return status;
 487}
 488
 489/* Tell fw we're done with firing cmds by writing a
 490 * special pattern across the wrb hdr; uses mbox
 491 */
 492int be_cmd_fw_clean(struct be_adapter *adapter)
 493{
 494        u8 *wrb;
 495        int status;
 496
 497        if (adapter->eeh_err)
 498                return -EIO;
 499
 500        if (mutex_lock_interruptible(&adapter->mbox_lock))
 501                return -1;
 502
 503        wrb = (u8 *)wrb_from_mbox(adapter);
 504        *wrb++ = 0xFF;
 505        *wrb++ = 0xAA;
 506        *wrb++ = 0xBB;
 507        *wrb++ = 0xFF;
 508        *wrb++ = 0xFF;
 509        *wrb++ = 0xCC;
 510        *wrb++ = 0xDD;
 511        *wrb = 0xFF;
 512
 513        status = be_mbox_notify_wait(adapter);
 514
 515        mutex_unlock(&adapter->mbox_lock);
 516        return status;
 517}
 518int be_cmd_eq_create(struct be_adapter *adapter,
 519                struct be_queue_info *eq, int eq_delay)
 520{
 521        struct be_mcc_wrb *wrb;
 522        struct be_cmd_req_eq_create *req;
 523        struct be_dma_mem *q_mem = &eq->dma_mem;
 524        int status;
 525
 526        if (mutex_lock_interruptible(&adapter->mbox_lock))
 527                return -1;
 528
 529        wrb = wrb_from_mbox(adapter);
 530        req = embedded_payload(wrb);
 531
 532        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
 533
 534        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 535                OPCODE_COMMON_EQ_CREATE, sizeof(*req));
 536
 537        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 538
 539        AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
 540        /* 4byte eqe*/
 541        AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
 542        AMAP_SET_BITS(struct amap_eq_context, count, req->context,
 543                        __ilog2_u32(eq->len/256));
 544        AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
 545                        eq_delay_to_mult(eq_delay));
 546        be_dws_cpu_to_le(req->context, sizeof(req->context));
 547
 548        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 549
 550        status = be_mbox_notify_wait(adapter);
 551        if (!status) {
 552                struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
 553                eq->id = le16_to_cpu(resp->eq_id);
 554                eq->created = true;
 555        }
 556
 557        mutex_unlock(&adapter->mbox_lock);
 558        return status;
 559}
 560
 561/* Uses mbox */
 562int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 563                        u8 type, bool permanent, u32 if_handle)
 564{
 565        struct be_mcc_wrb *wrb;
 566        struct be_cmd_req_mac_query *req;
 567        int status;
 568
 569        if (mutex_lock_interruptible(&adapter->mbox_lock))
 570                return -1;
 571
 572        wrb = wrb_from_mbox(adapter);
 573        req = embedded_payload(wrb);
 574
 575        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 576                        OPCODE_COMMON_NTWK_MAC_QUERY);
 577
 578        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 579                OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
 580
 581        req->type = type;
 582        if (permanent) {
 583                req->permanent = 1;
 584        } else {
 585                req->if_id = cpu_to_le16((u16) if_handle);
 586                req->permanent = 0;
 587        }
 588
 589        status = be_mbox_notify_wait(adapter);
 590        if (!status) {
 591                struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
 592                memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
 593        }
 594
 595        mutex_unlock(&adapter->mbox_lock);
 596        return status;
 597}
 598
 599/* Uses synchronous MCCQ */
 600int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 601                u32 if_id, u32 *pmac_id)
 602{
 603        struct be_mcc_wrb *wrb;
 604        struct be_cmd_req_pmac_add *req;
 605        int status;
 606
 607        spin_lock_bh(&adapter->mcc_lock);
 608
 609        wrb = wrb_from_mccq(adapter);
 610        if (!wrb) {
 611                status = -EBUSY;
 612                goto err;
 613        }
 614        req = embedded_payload(wrb);
 615
 616        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 617                        OPCODE_COMMON_NTWK_PMAC_ADD);
 618
 619        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 620                OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
 621
 622        req->if_id = cpu_to_le32(if_id);
 623        memcpy(req->mac_address, mac_addr, ETH_ALEN);
 624
 625        status = be_mcc_notify_wait(adapter);
 626        if (!status) {
 627                struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
 628                *pmac_id = le32_to_cpu(resp->pmac_id);
 629        }
 630
 631err:
 632        spin_unlock_bh(&adapter->mcc_lock);
 633        return status;
 634}
 635
 636/* Uses synchronous MCCQ */
 637int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
 638{
 639        struct be_mcc_wrb *wrb;
 640        struct be_cmd_req_pmac_del *req;
 641        int status;
 642
 643        spin_lock_bh(&adapter->mcc_lock);
 644
 645        wrb = wrb_from_mccq(adapter);
 646        if (!wrb) {
 647                status = -EBUSY;
 648                goto err;
 649        }
 650        req = embedded_payload(wrb);
 651
 652        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 653                        OPCODE_COMMON_NTWK_PMAC_DEL);
 654
 655        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 656                OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
 657
 658        req->if_id = cpu_to_le32(if_id);
 659        req->pmac_id = cpu_to_le32(pmac_id);
 660
 661        status = be_mcc_notify_wait(adapter);
 662
 663err:
 664        spin_unlock_bh(&adapter->mcc_lock);
 665        return status;
 666}
 667
 668/* Uses Mbox */
 669int be_cmd_cq_create(struct be_adapter *adapter,
 670                struct be_queue_info *cq, struct be_queue_info *eq,
 671                bool sol_evts, bool no_delay, int coalesce_wm)
 672{
 673        struct be_mcc_wrb *wrb;
 674        struct be_cmd_req_cq_create *req;
 675        struct be_dma_mem *q_mem = &cq->dma_mem;
 676        void *ctxt;
 677        int status;
 678
 679        if (mutex_lock_interruptible(&adapter->mbox_lock))
 680                return -1;
 681
 682        wrb = wrb_from_mbox(adapter);
 683        req = embedded_payload(wrb);
 684        ctxt = &req->context;
 685
 686        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 687                        OPCODE_COMMON_CQ_CREATE);
 688
 689        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 690                OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 691
 692        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 693        if (lancer_chip(adapter)) {
 694                req->hdr.version = 1;
 695                req->page_size = 1; /* 1 for 4K */
 696                AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
 697                                                                coalesce_wm);
 698                AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
 699                                                                no_delay);
 700                AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
 701                                                __ilog2_u32(cq->len/256));
 702                AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
 703                AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
 704                                                                ctxt, 1);
 705                AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
 706                                                                ctxt, eq->id);
 707                AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
 708        } else {
 709                AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
 710                                                                coalesce_wm);
 711                AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
 712                                                                ctxt, no_delay);
 713                AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
 714                                                __ilog2_u32(cq->len/256));
 715                AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
 716                AMAP_SET_BITS(struct amap_cq_context_be, solevent,
 717                                                                ctxt, sol_evts);
 718                AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
 719                AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
 720                AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
 721        }
 722
 723        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 724
 725        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 726
 727        status = be_mbox_notify_wait(adapter);
 728        if (!status) {
 729                struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
 730                cq->id = le16_to_cpu(resp->cq_id);
 731                cq->created = true;
 732        }
 733
 734        mutex_unlock(&adapter->mbox_lock);
 735
 736        return status;
 737}
 738
 739static u32 be_encoded_q_len(int q_len)
 740{
 741        u32 len_encoded = fls(q_len); /* log2(len) + 1 */
 742        if (len_encoded == 16)
 743                len_encoded = 0;
 744        return len_encoded;
 745}
 746
 747int be_cmd_mccq_create(struct be_adapter *adapter,
 748                        struct be_queue_info *mccq,
 749                        struct be_queue_info *cq)
 750{
 751        struct be_mcc_wrb *wrb;
 752        struct be_cmd_req_mcc_create *req;
 753        struct be_dma_mem *q_mem = &mccq->dma_mem;
 754        void *ctxt;
 755        int status;
 756
 757        if (mutex_lock_interruptible(&adapter->mbox_lock))
 758                return -1;
 759
 760        wrb = wrb_from_mbox(adapter);
 761        req = embedded_payload(wrb);
 762        ctxt = &req->context;
 763
 764        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 765                        OPCODE_COMMON_MCC_CREATE_EXT);
 766
 767        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 768                        OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
 769
 770        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 771        if (lancer_chip(adapter)) {
 772                req->hdr.version = 1;
 773                req->cq_id = cpu_to_le16(cq->id);
 774
 775                AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
 776                                                be_encoded_q_len(mccq->len));
 777                AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
 778                AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
 779                                                                ctxt, cq->id);
 780                AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
 781                                                                 ctxt, 1);
 782
 783        } else {
 784                AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
 785                AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
 786                                                be_encoded_q_len(mccq->len));
 787                AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
 788        }
 789
 790        /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
 791        req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
 792        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 793
 794        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 795
 796        status = be_mbox_notify_wait(adapter);
 797        if (!status) {
 798                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
 799                mccq->id = le16_to_cpu(resp->id);
 800                mccq->created = true;
 801        }
 802        mutex_unlock(&adapter->mbox_lock);
 803
 804        return status;
 805}
 806
 807int be_cmd_txq_create(struct be_adapter *adapter,
 808                        struct be_queue_info *txq,
 809                        struct be_queue_info *cq)
 810{
 811        struct be_mcc_wrb *wrb;
 812        struct be_cmd_req_eth_tx_create *req;
 813        struct be_dma_mem *q_mem = &txq->dma_mem;
 814        void *ctxt;
 815        int status;
 816
 817        if (mutex_lock_interruptible(&adapter->mbox_lock))
 818                return -1;
 819
 820        wrb = wrb_from_mbox(adapter);
 821        req = embedded_payload(wrb);
 822        ctxt = &req->context;
 823
 824        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 825                        OPCODE_ETH_TX_CREATE);
 826
 827        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
 828                sizeof(*req));
 829
 830        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 831        req->ulp_num = BE_ULP1_NUM;
 832        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
 833
 834        AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
 835                be_encoded_q_len(txq->len));
 836        AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
 837        AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
 838
 839        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 840
 841        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 842
 843        status = be_mbox_notify_wait(adapter);
 844        if (!status) {
 845                struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
 846                txq->id = le16_to_cpu(resp->cid);
 847                txq->created = true;
 848        }
 849
 850        mutex_unlock(&adapter->mbox_lock);
 851
 852        return status;
 853}
 854
 855/* Uses mbox */
 856int be_cmd_rxq_create(struct be_adapter *adapter,
 857                struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
 858                u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
 859{
 860        struct be_mcc_wrb *wrb;
 861        struct be_cmd_req_eth_rx_create *req;
 862        struct be_dma_mem *q_mem = &rxq->dma_mem;
 863        int status;
 864
 865        if (mutex_lock_interruptible(&adapter->mbox_lock))
 866                return -1;
 867
 868        wrb = wrb_from_mbox(adapter);
 869        req = embedded_payload(wrb);
 870
 871        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 872                        OPCODE_ETH_RX_CREATE);
 873
 874        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
 875                sizeof(*req));
 876
 877        req->cq_id = cpu_to_le16(cq_id);
 878        req->frag_size = fls(frag_size) - 1;
 879        req->num_pages = 2;
 880        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 881        req->interface_id = cpu_to_le32(if_id);
 882        req->max_frame_size = cpu_to_le16(max_frame_size);
 883        req->rss_queue = cpu_to_le32(rss);
 884
 885        status = be_mbox_notify_wait(adapter);
 886        if (!status) {
 887                struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
 888                rxq->id = le16_to_cpu(resp->id);
 889                rxq->created = true;
 890                *rss_id = resp->rss_id;
 891        }
 892
 893        mutex_unlock(&adapter->mbox_lock);
 894
 895        return status;
 896}
 897
 898/* Generic destroyer function for all types of queues
 899 * Uses Mbox
 900 */
 901int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
 902                int queue_type)
 903{
 904        struct be_mcc_wrb *wrb;
 905        struct be_cmd_req_q_destroy *req;
 906        u8 subsys = 0, opcode = 0;
 907        int status;
 908
 909        if (adapter->eeh_err)
 910                return -EIO;
 911
 912        if (mutex_lock_interruptible(&adapter->mbox_lock))
 913                return -1;
 914
 915        wrb = wrb_from_mbox(adapter);
 916        req = embedded_payload(wrb);
 917
 918        switch (queue_type) {
 919        case QTYPE_EQ:
 920                subsys = CMD_SUBSYSTEM_COMMON;
 921                opcode = OPCODE_COMMON_EQ_DESTROY;
 922                break;
 923        case QTYPE_CQ:
 924                subsys = CMD_SUBSYSTEM_COMMON;
 925                opcode = OPCODE_COMMON_CQ_DESTROY;
 926                break;
 927        case QTYPE_TXQ:
 928                subsys = CMD_SUBSYSTEM_ETH;
 929                opcode = OPCODE_ETH_TX_DESTROY;
 930                break;
 931        case QTYPE_RXQ:
 932                subsys = CMD_SUBSYSTEM_ETH;
 933                opcode = OPCODE_ETH_RX_DESTROY;
 934                break;
 935        case QTYPE_MCCQ:
 936                subsys = CMD_SUBSYSTEM_COMMON;
 937                opcode = OPCODE_COMMON_MCC_DESTROY;
 938                break;
 939        default:
 940                BUG();
 941        }
 942
 943        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
 944
 945        be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
 946        req->id = cpu_to_le16(q->id);
 947
 948        status = be_mbox_notify_wait(adapter);
 949
 950        mutex_unlock(&adapter->mbox_lock);
 951
 952        return status;
 953}
 954
 955/* Create an rx filtering policy configuration on an i/f
 956 * Uses mbox
 957 */
 958int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 959                u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
 960                u32 domain)
 961{
 962        struct be_mcc_wrb *wrb;
 963        struct be_cmd_req_if_create *req;
 964        int status;
 965
 966        if (mutex_lock_interruptible(&adapter->mbox_lock))
 967                return -1;
 968
 969        wrb = wrb_from_mbox(adapter);
 970        req = embedded_payload(wrb);
 971
 972        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 973                        OPCODE_COMMON_NTWK_INTERFACE_CREATE);
 974
 975        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 976                OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
 977
 978        req->hdr.domain = domain;
 979        req->capability_flags = cpu_to_le32(cap_flags);
 980        req->enable_flags = cpu_to_le32(en_flags);
 981        req->pmac_invalid = pmac_invalid;
 982        if (!pmac_invalid)
 983                memcpy(req->mac_addr, mac, ETH_ALEN);
 984
 985        status = be_mbox_notify_wait(adapter);
 986        if (!status) {
 987                struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
 988                *if_handle = le32_to_cpu(resp->interface_id);
 989                if (!pmac_invalid)
 990                        *pmac_id = le32_to_cpu(resp->pmac_id);
 991        }
 992
 993        mutex_unlock(&adapter->mbox_lock);
 994        return status;
 995}
 996
 997/* Uses mbox */
 998int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
 999{
1000        struct be_mcc_wrb *wrb;
1001        struct be_cmd_req_if_destroy *req;
1002        int status;
1003
1004        if (adapter->eeh_err)
1005                return -EIO;
1006
1007        if (mutex_lock_interruptible(&adapter->mbox_lock))
1008                return -1;
1009
1010        wrb = wrb_from_mbox(adapter);
1011        req = embedded_payload(wrb);
1012
1013        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1014                        OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1015
1016        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017                OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1018
1019        req->interface_id = cpu_to_le32(interface_id);
1020
1021        status = be_mbox_notify_wait(adapter);
1022
1023        mutex_unlock(&adapter->mbox_lock);
1024
1025        return status;
1026}
1027
1028/* Get stats is a non embedded command: the request is not embedded inside
1029 * WRB but is a separate dma memory block
1030 * Uses asynchronous MCC
1031 */
1032int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1033{
1034        struct be_mcc_wrb *wrb;
1035        struct be_cmd_req_get_stats *req;
1036        struct be_sge *sge;
1037        int status = 0;
1038
1039        spin_lock_bh(&adapter->mcc_lock);
1040
1041        wrb = wrb_from_mccq(adapter);
1042        if (!wrb) {
1043                status = -EBUSY;
1044                goto err;
1045        }
1046        req = nonemb_cmd->va;
1047        sge = nonembedded_sgl(wrb);
1048
1049        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1050                        OPCODE_ETH_GET_STATISTICS);
1051
1052        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1053                OPCODE_ETH_GET_STATISTICS, sizeof(*req));
1054        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1055        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1056        sge->len = cpu_to_le32(nonemb_cmd->size);
1057
1058        be_mcc_notify(adapter);
1059        adapter->stats_ioctl_sent = true;
1060
1061err:
1062        spin_unlock_bh(&adapter->mcc_lock);
1063        return status;
1064}
1065
1066/* Uses synchronous mcc */
1067int be_cmd_link_status_query(struct be_adapter *adapter,
1068                        bool *link_up, u8 *mac_speed, u16 *link_speed)
1069{
1070        struct be_mcc_wrb *wrb;
1071        struct be_cmd_req_link_status *req;
1072        int status;
1073
1074        spin_lock_bh(&adapter->mcc_lock);
1075
1076        wrb = wrb_from_mccq(adapter);
1077        if (!wrb) {
1078                status = -EBUSY;
1079                goto err;
1080        }
1081        req = embedded_payload(wrb);
1082
1083        *link_up = false;
1084
1085        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1086                        OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1087
1088        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1089                OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1090
1091        status = be_mcc_notify_wait(adapter);
1092        if (!status) {
1093                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1094                if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1095                        *link_up = true;
1096                        *link_speed = le16_to_cpu(resp->link_speed);
1097                        *mac_speed = resp->mac_speed;
1098                }
1099        }
1100
1101err:
1102        spin_unlock_bh(&adapter->mcc_lock);
1103        return status;
1104}
1105
1106/* Uses Mbox */
1107int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1108{
1109        struct be_mcc_wrb *wrb;
1110        struct be_cmd_req_get_fw_version *req;
1111        int status;
1112
1113        if (mutex_lock_interruptible(&adapter->mbox_lock))
1114                return -1;
1115
1116        wrb = wrb_from_mbox(adapter);
1117        req = embedded_payload(wrb);
1118
1119        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1120                        OPCODE_COMMON_GET_FW_VERSION);
1121
1122        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1123                OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1124
1125        status = be_mbox_notify_wait(adapter);
1126        if (!status) {
1127                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1128                strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1129        }
1130
1131        mutex_unlock(&adapter->mbox_lock);
1132        return status;
1133}
1134
1135/* set the EQ delay interval of an EQ to specified value
1136 * Uses async mcc
1137 */
1138int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1139{
1140        struct be_mcc_wrb *wrb;
1141        struct be_cmd_req_modify_eq_delay *req;
1142        int status = 0;
1143
1144        spin_lock_bh(&adapter->mcc_lock);
1145
1146        wrb = wrb_from_mccq(adapter);
1147        if (!wrb) {
1148                status = -EBUSY;
1149                goto err;
1150        }
1151        req = embedded_payload(wrb);
1152
1153        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1154                        OPCODE_COMMON_MODIFY_EQ_DELAY);
1155
1156        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1157                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1158
1159        req->num_eq = cpu_to_le32(1);
1160        req->delay[0].eq_id = cpu_to_le32(eq_id);
1161        req->delay[0].phase = 0;
1162        req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1163
1164        be_mcc_notify(adapter);
1165
1166err:
1167        spin_unlock_bh(&adapter->mcc_lock);
1168        return status;
1169}
1170
1171/* Uses sycnhronous mcc */
1172int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1173                        u32 num, bool untagged, bool promiscuous)
1174{
1175        struct be_mcc_wrb *wrb;
1176        struct be_cmd_req_vlan_config *req;
1177        int status;
1178
1179        spin_lock_bh(&adapter->mcc_lock);
1180
1181        wrb = wrb_from_mccq(adapter);
1182        if (!wrb) {
1183                status = -EBUSY;
1184                goto err;
1185        }
1186        req = embedded_payload(wrb);
1187
1188        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1189                        OPCODE_COMMON_NTWK_VLAN_CONFIG);
1190
1191        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1192                OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1193
1194        req->interface_id = if_id;
1195        req->promiscuous = promiscuous;
1196        req->untagged = untagged;
1197        req->num_vlan = num;
1198        if (!promiscuous) {
1199                memcpy(req->normal_vlan, vtag_array,
1200                        req->num_vlan * sizeof(vtag_array[0]));
1201        }
1202
1203        status = be_mcc_notify_wait(adapter);
1204
1205err:
1206        spin_unlock_bh(&adapter->mcc_lock);
1207        return status;
1208}
1209
1210/* Uses MCC for this command as it may be called in BH context
1211 * Uses synchronous mcc
1212 */
1213int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1214{
1215        struct be_mcc_wrb *wrb;
1216        struct be_cmd_req_promiscuous_config *req;
1217        int status;
1218
1219        spin_lock_bh(&adapter->mcc_lock);
1220
1221        wrb = wrb_from_mccq(adapter);
1222        if (!wrb) {
1223                status = -EBUSY;
1224                goto err;
1225        }
1226        req = embedded_payload(wrb);
1227
1228        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
1229
1230        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1231                OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1232
1233        /* In FW versions X.102.149/X.101.487 and later,
1234         * the port setting associated only with the
1235         * issuing pci function will take effect
1236         */
1237        if (port_num)
1238                req->port1_promiscuous = en;
1239        else
1240                req->port0_promiscuous = en;
1241
1242        status = be_mcc_notify_wait(adapter);
1243
1244err:
1245        spin_unlock_bh(&adapter->mcc_lock);
1246        return status;
1247}
1248
1249/*
1250 * Uses MCC for this command as it may be called in BH context
1251 * (mc == NULL) => multicast promiscous
1252 */
1253int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1254                struct net_device *netdev, struct be_dma_mem *mem)
1255{
1256        struct be_mcc_wrb *wrb;
1257        struct be_cmd_req_mcast_mac_config *req = mem->va;
1258        struct be_sge *sge;
1259        int status;
1260
1261        spin_lock_bh(&adapter->mcc_lock);
1262
1263        wrb = wrb_from_mccq(adapter);
1264        if (!wrb) {
1265                status = -EBUSY;
1266                goto err;
1267        }
1268        sge = nonembedded_sgl(wrb);
1269        memset(req, 0, sizeof(*req));
1270
1271        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1272                        OPCODE_COMMON_NTWK_MULTICAST_SET);
1273        sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1274        sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1275        sge->len = cpu_to_le32(mem->size);
1276
1277        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1278                OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1279
1280        req->interface_id = if_id;
1281        if (netdev) {
1282                int i;
1283                struct netdev_hw_addr *ha;
1284
1285                req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1286
1287                i = 0;
1288                netdev_for_each_mc_addr(ha, netdev)
1289                        memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1290        } else {
1291                req->promiscuous = 1;
1292        }
1293
1294        status = be_mcc_notify_wait(adapter);
1295
1296err:
1297        spin_unlock_bh(&adapter->mcc_lock);
1298        return status;
1299}
1300
1301/* Uses synchrounous mcc */
1302int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1303{
1304        struct be_mcc_wrb *wrb;
1305        struct be_cmd_req_set_flow_control *req;
1306        int status;
1307
1308        spin_lock_bh(&adapter->mcc_lock);
1309
1310        wrb = wrb_from_mccq(adapter);
1311        if (!wrb) {
1312                status = -EBUSY;
1313                goto err;
1314        }
1315        req = embedded_payload(wrb);
1316
1317        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1318                        OPCODE_COMMON_SET_FLOW_CONTROL);
1319
1320        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1321                OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1322
1323        req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1324        req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1325
1326        status = be_mcc_notify_wait(adapter);
1327
1328err:
1329        spin_unlock_bh(&adapter->mcc_lock);
1330        return status;
1331}
1332
1333/* Uses sycn mcc */
1334int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1335{
1336        struct be_mcc_wrb *wrb;
1337        struct be_cmd_req_get_flow_control *req;
1338        int status;
1339
1340        spin_lock_bh(&adapter->mcc_lock);
1341
1342        wrb = wrb_from_mccq(adapter);
1343        if (!wrb) {
1344                status = -EBUSY;
1345                goto err;
1346        }
1347        req = embedded_payload(wrb);
1348
1349        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1350                        OPCODE_COMMON_GET_FLOW_CONTROL);
1351
1352        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1353                OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1354
1355        status = be_mcc_notify_wait(adapter);
1356        if (!status) {
1357                struct be_cmd_resp_get_flow_control *resp =
1358                                                embedded_payload(wrb);
1359                *tx_fc = le16_to_cpu(resp->tx_flow_control);
1360                *rx_fc = le16_to_cpu(resp->rx_flow_control);
1361        }
1362
1363err:
1364        spin_unlock_bh(&adapter->mcc_lock);
1365        return status;
1366}
1367
1368/* Uses mbox */
1369int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1370                u32 *mode, u32 *caps)
1371{
1372        struct be_mcc_wrb *wrb;
1373        struct be_cmd_req_query_fw_cfg *req;
1374        int status;
1375
1376        if (mutex_lock_interruptible(&adapter->mbox_lock))
1377                return -1;
1378
1379        wrb = wrb_from_mbox(adapter);
1380        req = embedded_payload(wrb);
1381
1382        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1383                        OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1384
1385        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1386                OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1387
1388        status = be_mbox_notify_wait(adapter);
1389        if (!status) {
1390                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1391                *port_num = le32_to_cpu(resp->phys_port);
1392                *mode = le32_to_cpu(resp->function_mode);
1393                *caps = le32_to_cpu(resp->function_caps);
1394        }
1395
1396        mutex_unlock(&adapter->mbox_lock);
1397        return status;
1398}
1399
1400/* Uses mbox */
1401int be_cmd_reset_function(struct be_adapter *adapter)
1402{
1403        struct be_mcc_wrb *wrb;
1404        struct be_cmd_req_hdr *req;
1405        int status;
1406
1407        if (mutex_lock_interruptible(&adapter->mbox_lock))
1408                return -1;
1409
1410        wrb = wrb_from_mbox(adapter);
1411        req = embedded_payload(wrb);
1412
1413        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1414                        OPCODE_COMMON_FUNCTION_RESET);
1415
1416        be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1417                OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1418
1419        status = be_mbox_notify_wait(adapter);
1420
1421        mutex_unlock(&adapter->mbox_lock);
1422        return status;
1423}
1424
1425int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1426{
1427        struct be_mcc_wrb *wrb;
1428        struct be_cmd_req_rss_config *req;
1429        u32 myhash[10];
1430        int status;
1431
1432        if (mutex_lock_interruptible(&adapter->mbox_lock))
1433                return -1;
1434
1435        wrb = wrb_from_mbox(adapter);
1436        req = embedded_payload(wrb);
1437
1438        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1439                OPCODE_ETH_RSS_CONFIG);
1440
1441        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1442                OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1443
1444        req->if_id = cpu_to_le32(adapter->if_handle);
1445        req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1446        req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1447        memcpy(req->cpu_table, rsstable, table_size);
1448        memcpy(req->hash, myhash, sizeof(myhash));
1449        be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1450
1451        status = be_mbox_notify_wait(adapter);
1452
1453        mutex_unlock(&adapter->mbox_lock);
1454        return status;
1455}
1456
1457/* Uses sync mcc */
1458int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1459                        u8 bcn, u8 sts, u8 state)
1460{
1461        struct be_mcc_wrb *wrb;
1462        struct be_cmd_req_enable_disable_beacon *req;
1463        int status;
1464
1465        spin_lock_bh(&adapter->mcc_lock);
1466
1467        wrb = wrb_from_mccq(adapter);
1468        if (!wrb) {
1469                status = -EBUSY;
1470                goto err;
1471        }
1472        req = embedded_payload(wrb);
1473
1474        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1475                        OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1476
1477        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1478                OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1479
1480        req->port_num = port_num;
1481        req->beacon_state = state;
1482        req->beacon_duration = bcn;
1483        req->status_duration = sts;
1484
1485        status = be_mcc_notify_wait(adapter);
1486
1487err:
1488        spin_unlock_bh(&adapter->mcc_lock);
1489        return status;
1490}
1491
1492/* Uses sync mcc */
1493int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1494{
1495        struct be_mcc_wrb *wrb;
1496        struct be_cmd_req_get_beacon_state *req;
1497        int status;
1498
1499        spin_lock_bh(&adapter->mcc_lock);
1500
1501        wrb = wrb_from_mccq(adapter);
1502        if (!wrb) {
1503                status = -EBUSY;
1504                goto err;
1505        }
1506        req = embedded_payload(wrb);
1507
1508        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1509                        OPCODE_COMMON_GET_BEACON_STATE);
1510
1511        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1512                OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1513
1514        req->port_num = port_num;
1515
1516        status = be_mcc_notify_wait(adapter);
1517        if (!status) {
1518                struct be_cmd_resp_get_beacon_state *resp =
1519                                                embedded_payload(wrb);
1520                *state = resp->beacon_state;
1521        }
1522
1523err:
1524        spin_unlock_bh(&adapter->mcc_lock);
1525        return status;
1526}
1527
1528int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1529                        u32 flash_type, u32 flash_opcode, u32 buf_size)
1530{
1531        struct be_mcc_wrb *wrb;
1532        struct be_cmd_write_flashrom *req;
1533        struct be_sge *sge;
1534        int status;
1535
1536        spin_lock_bh(&adapter->mcc_lock);
1537        adapter->flash_status = 0;
1538
1539        wrb = wrb_from_mccq(adapter);
1540        if (!wrb) {
1541                status = -EBUSY;
1542                goto err_unlock;
1543        }
1544        req = cmd->va;
1545        sge = nonembedded_sgl(wrb);
1546
1547        be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1548                        OPCODE_COMMON_WRITE_FLASHROM);
1549        wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1550
1551        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1552                OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1553        sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1554        sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1555        sge->len = cpu_to_le32(cmd->size);
1556
1557        req->params.op_type = cpu_to_le32(flash_type);
1558        req->params.op_code = cpu_to_le32(flash_opcode);
1559        req->params.data_buf_size = cpu_to_le32(buf_size);
1560
1561        be_mcc_notify(adapter);
1562        spin_unlock_bh(&adapter->mcc_lock);
1563
1564        if (!wait_for_completion_timeout(&adapter->flash_compl,
1565                        msecs_to_jiffies(12000)))
1566                status = -1;
1567        else
1568                status = adapter->flash_status;
1569
1570        return status;
1571
1572err_unlock:
1573        spin_unlock_bh(&adapter->mcc_lock);
1574        return status;
1575}
1576
1577int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1578                         int offset)
1579{
1580        struct be_mcc_wrb *wrb;
1581        struct be_cmd_write_flashrom *req;
1582        int status;
1583
1584        spin_lock_bh(&adapter->mcc_lock);
1585
1586        wrb = wrb_from_mccq(adapter);
1587        if (!wrb) {
1588                status = -EBUSY;
1589                goto err;
1590        }
1591        req = embedded_payload(wrb);
1592
1593        be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1594                        OPCODE_COMMON_READ_FLASHROM);
1595
1596        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1597                OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1598
1599        req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1600        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1601        req->params.offset = cpu_to_le32(offset);
1602        req->params.data_buf_size = cpu_to_le32(0x4);
1603
1604        status = be_mcc_notify_wait(adapter);
1605        if (!status)
1606                memcpy(flashed_crc, req->params.data_buf, 4);
1607
1608err:
1609        spin_unlock_bh(&adapter->mcc_lock);
1610        return status;
1611}
1612
1613int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1614                                struct be_dma_mem *nonemb_cmd)
1615{
1616        struct be_mcc_wrb *wrb;
1617        struct be_cmd_req_acpi_wol_magic_config *req;
1618        struct be_sge *sge;
1619        int status;
1620
1621        spin_lock_bh(&adapter->mcc_lock);
1622
1623        wrb = wrb_from_mccq(adapter);
1624        if (!wrb) {
1625                status = -EBUSY;
1626                goto err;
1627        }
1628        req = nonemb_cmd->va;
1629        sge = nonembedded_sgl(wrb);
1630
1631        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1632                        OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1633
1634        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1635                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1636        memcpy(req->magic_mac, mac, ETH_ALEN);
1637
1638        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1639        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1640        sge->len = cpu_to_le32(nonemb_cmd->size);
1641
1642        status = be_mcc_notify_wait(adapter);
1643
1644err:
1645        spin_unlock_bh(&adapter->mcc_lock);
1646        return status;
1647}
1648
1649int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1650                        u8 loopback_type, u8 enable)
1651{
1652        struct be_mcc_wrb *wrb;
1653        struct be_cmd_req_set_lmode *req;
1654        int status;
1655
1656        spin_lock_bh(&adapter->mcc_lock);
1657
1658        wrb = wrb_from_mccq(adapter);
1659        if (!wrb) {
1660                status = -EBUSY;
1661                goto err;
1662        }
1663
1664        req = embedded_payload(wrb);
1665
1666        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1667                                OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1668
1669        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1670                        OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1671                        sizeof(*req));
1672
1673        req->src_port = port_num;
1674        req->dest_port = port_num;
1675        req->loopback_type = loopback_type;
1676        req->loopback_state = enable;
1677
1678        status = be_mcc_notify_wait(adapter);
1679err:
1680        spin_unlock_bh(&adapter->mcc_lock);
1681        return status;
1682}
1683
1684int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1685                u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1686{
1687        struct be_mcc_wrb *wrb;
1688        struct be_cmd_req_loopback_test *req;
1689        int status;
1690
1691        spin_lock_bh(&adapter->mcc_lock);
1692
1693        wrb = wrb_from_mccq(adapter);
1694        if (!wrb) {
1695                status = -EBUSY;
1696                goto err;
1697        }
1698
1699        req = embedded_payload(wrb);
1700
1701        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1702                                OPCODE_LOWLEVEL_LOOPBACK_TEST);
1703
1704        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1705                        OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1706        req->hdr.timeout = cpu_to_le32(4);
1707
1708        req->pattern = cpu_to_le64(pattern);
1709        req->src_port = cpu_to_le32(port_num);
1710        req->dest_port = cpu_to_le32(port_num);
1711        req->pkt_size = cpu_to_le32(pkt_size);
1712        req->num_pkts = cpu_to_le32(num_pkts);
1713        req->loopback_type = cpu_to_le32(loopback_type);
1714
1715        status = be_mcc_notify_wait(adapter);
1716        if (!status) {
1717                struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1718                status = le32_to_cpu(resp->status);
1719        }
1720
1721err:
1722        spin_unlock_bh(&adapter->mcc_lock);
1723        return status;
1724}
1725
1726int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1727                                u32 byte_cnt, struct be_dma_mem *cmd)
1728{
1729        struct be_mcc_wrb *wrb;
1730        struct be_cmd_req_ddrdma_test *req;
1731        struct be_sge *sge;
1732        int status;
1733        int i, j = 0;
1734
1735        spin_lock_bh(&adapter->mcc_lock);
1736
1737        wrb = wrb_from_mccq(adapter);
1738        if (!wrb) {
1739                status = -EBUSY;
1740                goto err;
1741        }
1742        req = cmd->va;
1743        sge = nonembedded_sgl(wrb);
1744        be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1745                                OPCODE_LOWLEVEL_HOST_DDR_DMA);
1746        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1747                        OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1748
1749        sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1750        sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1751        sge->len = cpu_to_le32(cmd->size);
1752
1753        req->pattern = cpu_to_le64(pattern);
1754        req->byte_count = cpu_to_le32(byte_cnt);
1755        for (i = 0; i < byte_cnt; i++) {
1756                req->snd_buff[i] = (u8)(pattern >> (j*8));
1757                j++;
1758                if (j > 7)
1759                        j = 0;
1760        }
1761
1762        status = be_mcc_notify_wait(adapter);
1763
1764        if (!status) {
1765                struct be_cmd_resp_ddrdma_test *resp;
1766                resp = cmd->va;
1767                if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1768                                resp->snd_err) {
1769                        status = -1;
1770                }
1771        }
1772
1773err:
1774        spin_unlock_bh(&adapter->mcc_lock);
1775        return status;
1776}
1777
1778int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1779                                struct be_dma_mem *nonemb_cmd)
1780{
1781        struct be_mcc_wrb *wrb;
1782        struct be_cmd_req_seeprom_read *req;
1783        struct be_sge *sge;
1784        int status;
1785
1786        spin_lock_bh(&adapter->mcc_lock);
1787
1788        wrb = wrb_from_mccq(adapter);
1789        if (!wrb) {
1790                status = -EBUSY;
1791                goto err;
1792        }
1793        req = nonemb_cmd->va;
1794        sge = nonembedded_sgl(wrb);
1795
1796        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1797                        OPCODE_COMMON_SEEPROM_READ);
1798
1799        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1800                        OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1801
1802        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1803        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1804        sge->len = cpu_to_le32(nonemb_cmd->size);
1805
1806        status = be_mcc_notify_wait(adapter);
1807
1808err:
1809        spin_unlock_bh(&adapter->mcc_lock);
1810        return status;
1811}
1812
1813int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
1814{
1815        struct be_mcc_wrb *wrb;
1816        struct be_cmd_req_get_phy_info *req;
1817        struct be_sge *sge;
1818        int status;
1819
1820        spin_lock_bh(&adapter->mcc_lock);
1821
1822        wrb = wrb_from_mccq(adapter);
1823        if (!wrb) {
1824                status = -EBUSY;
1825                goto err;
1826        }
1827
1828        req = cmd->va;
1829        sge = nonembedded_sgl(wrb);
1830
1831        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1832                                OPCODE_COMMON_GET_PHY_DETAILS);
1833
1834        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1835                        OPCODE_COMMON_GET_PHY_DETAILS,
1836                        sizeof(*req));
1837
1838        sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1839        sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1840        sge->len = cpu_to_le32(cmd->size);
1841
1842        status = be_mcc_notify_wait(adapter);
1843err:
1844        spin_unlock_bh(&adapter->mcc_lock);
1845        return status;
1846}
1847
1848int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1849{
1850        struct be_mcc_wrb *wrb;
1851        struct be_cmd_req_set_qos *req;
1852        int status;
1853
1854        spin_lock_bh(&adapter->mcc_lock);
1855
1856        wrb = wrb_from_mccq(adapter);
1857        if (!wrb) {
1858                status = -EBUSY;
1859                goto err;
1860        }
1861
1862        req = embedded_payload(wrb);
1863
1864        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1865                                OPCODE_COMMON_SET_QOS);
1866
1867        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1868                        OPCODE_COMMON_SET_QOS, sizeof(*req));
1869
1870        req->hdr.domain = domain;
1871        req->valid_bits = BE_QOS_BITS_NIC;
1872        req->max_bps_nic = bps;
1873
1874        status = be_mcc_notify_wait(adapter);
1875
1876err:
1877        spin_unlock_bh(&adapter->mcc_lock);
1878        return status;
1879}
1880