linux/drivers/net/benet/be_cmds.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2011 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@emulex.com
  12 *
  13 * Emulex
  14 * 3333 Susan Street
  15 * Costa Mesa, CA 92626
  16 */
  17
  18#include "be.h"
  19#include "be_cmds.h"
  20
  21/* Must be a power of 2 or else MODULO will BUG_ON */
  22static int be_get_temp_freq = 32;
  23
  24static void be_mcc_notify(struct be_adapter *adapter)
  25{
  26        struct be_queue_info *mccq = &adapter->mcc_obj.q;
  27        u32 val = 0;
  28
  29        if (adapter->eeh_err) {
  30                dev_info(&adapter->pdev->dev,
  31                        "Error in Card Detected! Cannot issue commands\n");
  32                return;
  33        }
  34
  35        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  36        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  37
  38        wmb();
  39        iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  40}
  41
  42/* To check if valid bit is set, check the entire word as we don't know
  43 * the endianness of the data (old entry is host endian while a new entry is
  44 * little endian) */
  45static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  46{
  47        if (compl->flags != 0) {
  48                compl->flags = le32_to_cpu(compl->flags);
  49                BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  50                return true;
  51        } else {
  52                return false;
  53        }
  54}
  55
  56/* Need to reset the entire word that houses the valid bit */
  57static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  58{
  59        compl->flags = 0;
  60}
  61
  62static int be_mcc_compl_process(struct be_adapter *adapter,
  63        struct be_mcc_compl *compl)
  64{
  65        u16 compl_status, extd_status;
  66
  67        /* Just swap the status to host endian; mcc tag is opaquely copied
  68         * from mcc_wrb */
  69        be_dws_le_to_cpu(compl, 4);
  70
  71        compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  72                                CQE_STATUS_COMPL_MASK;
  73
  74        if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
  75                (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
  76                (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
  77                adapter->flash_status = compl_status;
  78                complete(&adapter->flash_compl);
  79        }
  80
  81        if (compl_status == MCC_STATUS_SUCCESS) {
  82                if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
  83                         (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
  84                        (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
  85                        if (adapter->generation == BE_GEN3) {
  86                                if (lancer_chip(adapter)) {
  87                                        struct lancer_cmd_resp_pport_stats
  88                                                *resp = adapter->stats_cmd.va;
  89                                        be_dws_le_to_cpu(&resp->pport_stats,
  90                                                sizeof(resp->pport_stats));
  91                                } else {
  92                                        struct be_cmd_resp_get_stats_v1 *resp =
  93                                                        adapter->stats_cmd.va;
  94
  95                                be_dws_le_to_cpu(&resp->hw_stats,
  96                                                        sizeof(resp->hw_stats));
  97                                }
  98                        } else {
  99                                struct be_cmd_resp_get_stats_v0 *resp =
 100                                                        adapter->stats_cmd.va;
 101
 102                                be_dws_le_to_cpu(&resp->hw_stats,
 103                                                        sizeof(resp->hw_stats));
 104                        }
 105                        be_parse_stats(adapter);
 106                        netdev_stats_update(adapter);
 107                        adapter->stats_cmd_sent = false;
 108                }
 109        } else {
 110                if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
 111                        compl_status == MCC_STATUS_ILLEGAL_REQUEST)
 112                        goto done;
 113
 114                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
 115                        dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
 116                                "permitted to execute this cmd (opcode %d)\n",
 117                                compl->tag0);
 118                } else {
 119                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 120                                        CQE_STATUS_EXTD_MASK;
 121                        dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
 122                                "status %d, extd-status %d\n",
 123                                compl->tag0, compl_status, extd_status);
 124                }
 125        }
 126done:
 127        return compl_status;
 128}
 129
 130/* Link state evt is a string of bytes; no need for endian swapping */
 131static void be_async_link_state_process(struct be_adapter *adapter,
 132                struct be_async_event_link_state *evt)
 133{
 134        be_link_status_update(adapter,
 135                evt->port_link_status == ASYNC_EVENT_LINK_UP);
 136}
 137
 138/* Grp5 CoS Priority evt */
 139static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 140                struct be_async_event_grp5_cos_priority *evt)
 141{
 142        if (evt->valid) {
 143                adapter->vlan_prio_bmap = evt->available_priority_bmap;
 144                adapter->recommended_prio &= ~VLAN_PRIO_MASK;
 145                adapter->recommended_prio =
 146                        evt->reco_default_priority << VLAN_PRIO_SHIFT;
 147        }
 148}
 149
 150/* Grp5 QOS Speed evt */
 151static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
 152                struct be_async_event_grp5_qos_link_speed *evt)
 153{
 154        if (evt->physical_port == adapter->port_num) {
 155                /* qos_link_speed is in units of 10 Mbps */
 156                adapter->link_speed = evt->qos_link_speed * 10;
 157        }
 158}
 159
 160/*Grp5 PVID evt*/
 161static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
 162                struct be_async_event_grp5_pvid_state *evt)
 163{
 164        if (evt->enabled)
 165                adapter->pvid = le16_to_cpu(evt->tag);
 166        else
 167                adapter->pvid = 0;
 168}
 169
 170static void be_async_grp5_evt_process(struct be_adapter *adapter,
 171                u32 trailer, struct be_mcc_compl *evt)
 172{
 173        u8 event_type = 0;
 174
 175        event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
 176                ASYNC_TRAILER_EVENT_TYPE_MASK;
 177
 178        switch (event_type) {
 179        case ASYNC_EVENT_COS_PRIORITY:
 180                be_async_grp5_cos_priority_process(adapter,
 181                (struct be_async_event_grp5_cos_priority *)evt);
 182        break;
 183        case ASYNC_EVENT_QOS_SPEED:
 184                be_async_grp5_qos_speed_process(adapter,
 185                (struct be_async_event_grp5_qos_link_speed *)evt);
 186        break;
 187        case ASYNC_EVENT_PVID_STATE:
 188                be_async_grp5_pvid_state_process(adapter,
 189                (struct be_async_event_grp5_pvid_state *)evt);
 190        break;
 191        default:
 192                dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
 193                break;
 194        }
 195}
 196
 197static inline bool is_link_state_evt(u32 trailer)
 198{
 199        return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 200                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 201                                ASYNC_EVENT_CODE_LINK_STATE;
 202}
 203
 204static inline bool is_grp5_evt(u32 trailer)
 205{
 206        return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
 207                ASYNC_TRAILER_EVENT_CODE_MASK) ==
 208                                ASYNC_EVENT_CODE_GRP_5);
 209}
 210
 211static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
 212{
 213        struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
 214        struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
 215
 216        if (be_mcc_compl_is_new(compl)) {
 217                queue_tail_inc(mcc_cq);
 218                return compl;
 219        }
 220        return NULL;
 221}
 222
 223void be_async_mcc_enable(struct be_adapter *adapter)
 224{
 225        spin_lock_bh(&adapter->mcc_cq_lock);
 226
 227        be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
 228        adapter->mcc_obj.rearm_cq = true;
 229
 230        spin_unlock_bh(&adapter->mcc_cq_lock);
 231}
 232
 233void be_async_mcc_disable(struct be_adapter *adapter)
 234{
 235        adapter->mcc_obj.rearm_cq = false;
 236}
 237
 238int be_process_mcc(struct be_adapter *adapter, int *status)
 239{
 240        struct be_mcc_compl *compl;
 241        int num = 0;
 242        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 243
 244        spin_lock_bh(&adapter->mcc_cq_lock);
 245        while ((compl = be_mcc_compl_get(adapter))) {
 246                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 247                        /* Interpret flags as an async trailer */
 248                        if (is_link_state_evt(compl->flags))
 249                                be_async_link_state_process(adapter,
 250                                (struct be_async_event_link_state *) compl);
 251                        else if (is_grp5_evt(compl->flags))
 252                                be_async_grp5_evt_process(adapter,
 253                                compl->flags, compl);
 254                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
 255                                *status = be_mcc_compl_process(adapter, compl);
 256                                atomic_dec(&mcc_obj->q.used);
 257                }
 258                be_mcc_compl_use(compl);
 259                num++;
 260        }
 261
 262        spin_unlock_bh(&adapter->mcc_cq_lock);
 263        return num;
 264}
 265
 266/* Wait till no more pending mcc requests are present */
 267static int be_mcc_wait_compl(struct be_adapter *adapter)
 268{
 269#define mcc_timeout             120000 /* 12s timeout */
 270        int i, num, status = 0;
 271        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 272
 273        if (adapter->eeh_err)
 274                return -EIO;
 275
 276        for (i = 0; i < mcc_timeout; i++) {
 277                num = be_process_mcc(adapter, &status);
 278                if (num)
 279                        be_cq_notify(adapter, mcc_obj->cq.id,
 280                                mcc_obj->rearm_cq, num);
 281
 282                if (atomic_read(&mcc_obj->q.used) == 0)
 283                        break;
 284                udelay(100);
 285        }
 286        if (i == mcc_timeout) {
 287                dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
 288                return -1;
 289        }
 290        return status;
 291}
 292
 293/* Notify MCC requests and wait for completion */
 294static int be_mcc_notify_wait(struct be_adapter *adapter)
 295{
 296        be_mcc_notify(adapter);
 297        return be_mcc_wait_compl(adapter);
 298}
 299
 300static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 301{
 302        int msecs = 0;
 303        u32 ready;
 304
 305        if (adapter->eeh_err) {
 306                dev_err(&adapter->pdev->dev,
 307                        "Error detected in card.Cannot issue commands\n");
 308                return -EIO;
 309        }
 310
 311        do {
 312                ready = ioread32(db);
 313                if (ready == 0xffffffff) {
 314                        dev_err(&adapter->pdev->dev,
 315                                "pci slot disconnected\n");
 316                        return -1;
 317                }
 318
 319                ready &= MPU_MAILBOX_DB_RDY_MASK;
 320                if (ready)
 321                        break;
 322
 323                if (msecs > 4000) {
 324                        dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
 325                        if (!lancer_chip(adapter))
 326                                be_detect_dump_ue(adapter);
 327                        return -1;
 328                }
 329
 330                msleep(1);
 331                msecs++;
 332        } while (true);
 333
 334        return 0;
 335}
 336
 337/*
 338 * Insert the mailbox address into the doorbell in two steps
 339 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
 340 */
 341static int be_mbox_notify_wait(struct be_adapter *adapter)
 342{
 343        int status;
 344        u32 val = 0;
 345        void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
 346        struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
 347        struct be_mcc_mailbox *mbox = mbox_mem->va;
 348        struct be_mcc_compl *compl = &mbox->compl;
 349
 350        /* wait for ready to be set */
 351        status = be_mbox_db_ready_wait(adapter, db);
 352        if (status != 0)
 353                return status;
 354
 355        val |= MPU_MAILBOX_DB_HI_MASK;
 356        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
 357        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
 358        iowrite32(val, db);
 359
 360        /* wait for ready to be set */
 361        status = be_mbox_db_ready_wait(adapter, db);
 362        if (status != 0)
 363                return status;
 364
 365        val = 0;
 366        /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
 367        val |= (u32)(mbox_mem->dma >> 4) << 2;
 368        iowrite32(val, db);
 369
 370        status = be_mbox_db_ready_wait(adapter, db);
 371        if (status != 0)
 372                return status;
 373
 374        /* A cq entry has been made now */
 375        if (be_mcc_compl_is_new(compl)) {
 376                status = be_mcc_compl_process(adapter, &mbox->compl);
 377                be_mcc_compl_use(compl);
 378                if (status)
 379                        return status;
 380        } else {
 381                dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
 382                return -1;
 383        }
 384        return 0;
 385}
 386
 387static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
 388{
 389        u32 sem;
 390
 391        if (lancer_chip(adapter))
 392                sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
 393        else
 394                sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
 395
 396        *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
 397        if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
 398                return -1;
 399        else
 400                return 0;
 401}
 402
 403int be_cmd_POST(struct be_adapter *adapter)
 404{
 405        u16 stage;
 406        int status, timeout = 0;
 407        struct device *dev = &adapter->pdev->dev;
 408
 409        do {
 410                status = be_POST_stage_get(adapter, &stage);
 411                if (status) {
 412                        dev_err(dev, "POST error; stage=0x%x\n", stage);
 413                        return -1;
 414                } else if (stage != POST_STAGE_ARMFW_RDY) {
 415                        if (msleep_interruptible(2000)) {
 416                                dev_err(dev, "Waiting for POST aborted\n");
 417                                return -EINTR;
 418                        }
 419                        timeout += 2;
 420                } else {
 421                        return 0;
 422                }
 423        } while (timeout < 40);
 424
 425        dev_err(dev, "POST timeout; stage=0x%x\n", stage);
 426        return -1;
 427}
 428
 429static inline void *embedded_payload(struct be_mcc_wrb *wrb)
 430{
 431        return wrb->payload.embedded_payload;
 432}
 433
 434static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
 435{
 436        return &wrb->payload.sgl[0];
 437}
 438
 439/* Don't touch the hdr after it's prepared */
 440static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 441                                bool embedded, u8 sge_cnt, u32 opcode)
 442{
 443        if (embedded)
 444                wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
 445        else
 446                wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
 447                                MCC_WRB_SGE_CNT_SHIFT;
 448        wrb->payload_length = payload_len;
 449        wrb->tag0 = opcode;
 450        be_dws_cpu_to_le(wrb, 8);
 451}
 452
 453/* Don't touch the hdr after it's prepared */
 454static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 455                                u8 subsystem, u8 opcode, int cmd_len)
 456{
 457        req_hdr->opcode = opcode;
 458        req_hdr->subsystem = subsystem;
 459        req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
 460        req_hdr->version = 0;
 461}
 462
 463static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
 464                        struct be_dma_mem *mem)
 465{
 466        int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
 467        u64 dma = (u64)mem->dma;
 468
 469        for (i = 0; i < buf_pages; i++) {
 470                pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
 471                pages[i].hi = cpu_to_le32(upper_32_bits(dma));
 472                dma += PAGE_SIZE_4K;
 473        }
 474}
 475
 476/* Converts interrupt delay in microseconds to multiplier value */
 477static u32 eq_delay_to_mult(u32 usec_delay)
 478{
 479#define MAX_INTR_RATE                   651042
 480        const u32 round = 10;
 481        u32 multiplier;
 482
 483        if (usec_delay == 0)
 484                multiplier = 0;
 485        else {
 486                u32 interrupt_rate = 1000000 / usec_delay;
 487                /* Max delay, corresponding to the lowest interrupt rate */
 488                if (interrupt_rate == 0)
 489                        multiplier = 1023;
 490                else {
 491                        multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
 492                        multiplier /= interrupt_rate;
 493                        /* Round the multiplier to the closest value.*/
 494                        multiplier = (multiplier + round/2) / round;
 495                        multiplier = min(multiplier, (u32)1023);
 496                }
 497        }
 498        return multiplier;
 499}
 500
 501static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
 502{
 503        struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
 504        struct be_mcc_wrb *wrb
 505                = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 506        memset(wrb, 0, sizeof(*wrb));
 507        return wrb;
 508}
 509
 510static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
 511{
 512        struct be_queue_info *mccq = &adapter->mcc_obj.q;
 513        struct be_mcc_wrb *wrb;
 514
 515        if (atomic_read(&mccq->used) >= mccq->len) {
 516                dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
 517                return NULL;
 518        }
 519
 520        wrb = queue_head_node(mccq);
 521        queue_head_inc(mccq);
 522        atomic_inc(&mccq->used);
 523        memset(wrb, 0, sizeof(*wrb));
 524        return wrb;
 525}
 526
 527/* Tell fw we're about to start firing cmds by writing a
 528 * special pattern across the wrb hdr; uses mbox
 529 */
 530int be_cmd_fw_init(struct be_adapter *adapter)
 531{
 532        u8 *wrb;
 533        int status;
 534
 535        if (mutex_lock_interruptible(&adapter->mbox_lock))
 536                return -1;
 537
 538        wrb = (u8 *)wrb_from_mbox(adapter);
 539        *wrb++ = 0xFF;
 540        *wrb++ = 0x12;
 541        *wrb++ = 0x34;
 542        *wrb++ = 0xFF;
 543        *wrb++ = 0xFF;
 544        *wrb++ = 0x56;
 545        *wrb++ = 0x78;
 546        *wrb = 0xFF;
 547
 548        status = be_mbox_notify_wait(adapter);
 549
 550        mutex_unlock(&adapter->mbox_lock);
 551        return status;
 552}
 553
 554/* Tell fw we're done with firing cmds by writing a
 555 * special pattern across the wrb hdr; uses mbox
 556 */
 557int be_cmd_fw_clean(struct be_adapter *adapter)
 558{
 559        u8 *wrb;
 560        int status;
 561
 562        if (adapter->eeh_err)
 563                return -EIO;
 564
 565        if (mutex_lock_interruptible(&adapter->mbox_lock))
 566                return -1;
 567
 568        wrb = (u8 *)wrb_from_mbox(adapter);
 569        *wrb++ = 0xFF;
 570        *wrb++ = 0xAA;
 571        *wrb++ = 0xBB;
 572        *wrb++ = 0xFF;
 573        *wrb++ = 0xFF;
 574        *wrb++ = 0xCC;
 575        *wrb++ = 0xDD;
 576        *wrb = 0xFF;
 577
 578        status = be_mbox_notify_wait(adapter);
 579
 580        mutex_unlock(&adapter->mbox_lock);
 581        return status;
 582}
 583int be_cmd_eq_create(struct be_adapter *adapter,
 584                struct be_queue_info *eq, int eq_delay)
 585{
 586        struct be_mcc_wrb *wrb;
 587        struct be_cmd_req_eq_create *req;
 588        struct be_dma_mem *q_mem = &eq->dma_mem;
 589        int status;
 590
 591        if (mutex_lock_interruptible(&adapter->mbox_lock))
 592                return -1;
 593
 594        wrb = wrb_from_mbox(adapter);
 595        req = embedded_payload(wrb);
 596
 597        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
 598
 599        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 600                OPCODE_COMMON_EQ_CREATE, sizeof(*req));
 601
 602        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 603
 604        AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
 605        /* 4byte eqe*/
 606        AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
 607        AMAP_SET_BITS(struct amap_eq_context, count, req->context,
 608                        __ilog2_u32(eq->len/256));
 609        AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
 610                        eq_delay_to_mult(eq_delay));
 611        be_dws_cpu_to_le(req->context, sizeof(req->context));
 612
 613        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 614
 615        status = be_mbox_notify_wait(adapter);
 616        if (!status) {
 617                struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
 618                eq->id = le16_to_cpu(resp->eq_id);
 619                eq->created = true;
 620        }
 621
 622        mutex_unlock(&adapter->mbox_lock);
 623        return status;
 624}
 625
 626/* Uses mbox */
 627int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 628                        u8 type, bool permanent, u32 if_handle)
 629{
 630        struct be_mcc_wrb *wrb;
 631        struct be_cmd_req_mac_query *req;
 632        int status;
 633
 634        if (mutex_lock_interruptible(&adapter->mbox_lock))
 635                return -1;
 636
 637        wrb = wrb_from_mbox(adapter);
 638        req = embedded_payload(wrb);
 639
 640        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 641                        OPCODE_COMMON_NTWK_MAC_QUERY);
 642
 643        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 644                OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
 645
 646        req->type = type;
 647        if (permanent) {
 648                req->permanent = 1;
 649        } else {
 650                req->if_id = cpu_to_le16((u16) if_handle);
 651                req->permanent = 0;
 652        }
 653
 654        status = be_mbox_notify_wait(adapter);
 655        if (!status) {
 656                struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
 657                memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
 658        }
 659
 660        mutex_unlock(&adapter->mbox_lock);
 661        return status;
 662}
 663
 664/* Uses synchronous MCCQ */
 665int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 666                u32 if_id, u32 *pmac_id, u32 domain)
 667{
 668        struct be_mcc_wrb *wrb;
 669        struct be_cmd_req_pmac_add *req;
 670        int status;
 671
 672        spin_lock_bh(&adapter->mcc_lock);
 673
 674        wrb = wrb_from_mccq(adapter);
 675        if (!wrb) {
 676                status = -EBUSY;
 677                goto err;
 678        }
 679        req = embedded_payload(wrb);
 680
 681        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 682                        OPCODE_COMMON_NTWK_PMAC_ADD);
 683
 684        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 685                OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
 686
 687        req->hdr.domain = domain;
 688        req->if_id = cpu_to_le32(if_id);
 689        memcpy(req->mac_address, mac_addr, ETH_ALEN);
 690
 691        status = be_mcc_notify_wait(adapter);
 692        if (!status) {
 693                struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
 694                *pmac_id = le32_to_cpu(resp->pmac_id);
 695        }
 696
 697err:
 698        spin_unlock_bh(&adapter->mcc_lock);
 699        return status;
 700}
 701
 702/* Uses synchronous MCCQ */
 703int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
 704{
 705        struct be_mcc_wrb *wrb;
 706        struct be_cmd_req_pmac_del *req;
 707        int status;
 708
 709        spin_lock_bh(&adapter->mcc_lock);
 710
 711        wrb = wrb_from_mccq(adapter);
 712        if (!wrb) {
 713                status = -EBUSY;
 714                goto err;
 715        }
 716        req = embedded_payload(wrb);
 717
 718        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 719                        OPCODE_COMMON_NTWK_PMAC_DEL);
 720
 721        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 722                OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
 723
 724        req->hdr.domain = dom;
 725        req->if_id = cpu_to_le32(if_id);
 726        req->pmac_id = cpu_to_le32(pmac_id);
 727
 728        status = be_mcc_notify_wait(adapter);
 729
 730err:
 731        spin_unlock_bh(&adapter->mcc_lock);
 732        return status;
 733}
 734
 735/* Uses Mbox */
 736int be_cmd_cq_create(struct be_adapter *adapter,
 737                struct be_queue_info *cq, struct be_queue_info *eq,
 738                bool sol_evts, bool no_delay, int coalesce_wm)
 739{
 740        struct be_mcc_wrb *wrb;
 741        struct be_cmd_req_cq_create *req;
 742        struct be_dma_mem *q_mem = &cq->dma_mem;
 743        void *ctxt;
 744        int status;
 745
 746        if (mutex_lock_interruptible(&adapter->mbox_lock))
 747                return -1;
 748
 749        wrb = wrb_from_mbox(adapter);
 750        req = embedded_payload(wrb);
 751        ctxt = &req->context;
 752
 753        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 754                        OPCODE_COMMON_CQ_CREATE);
 755
 756        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 757                OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 758
 759        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 760        if (lancer_chip(adapter)) {
 761                req->hdr.version = 2;
 762                req->page_size = 1; /* 1 for 4K */
 763                AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
 764                                                                no_delay);
 765                AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
 766                                                __ilog2_u32(cq->len/256));
 767                AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
 768                AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
 769                                                                ctxt, 1);
 770                AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
 771                                                                ctxt, eq->id);
 772                AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
 773        } else {
 774                AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
 775                                                                coalesce_wm);
 776                AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
 777                                                                ctxt, no_delay);
 778                AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
 779                                                __ilog2_u32(cq->len/256));
 780                AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
 781                AMAP_SET_BITS(struct amap_cq_context_be, solevent,
 782                                                                ctxt, sol_evts);
 783                AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
 784                AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
 785                AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
 786        }
 787
 788        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 789
 790        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 791
 792        status = be_mbox_notify_wait(adapter);
 793        if (!status) {
 794                struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
 795                cq->id = le16_to_cpu(resp->cq_id);
 796                cq->created = true;
 797        }
 798
 799        mutex_unlock(&adapter->mbox_lock);
 800
 801        return status;
 802}
 803
 804static u32 be_encoded_q_len(int q_len)
 805{
 806        u32 len_encoded = fls(q_len); /* log2(len) + 1 */
 807        if (len_encoded == 16)
 808                len_encoded = 0;
 809        return len_encoded;
 810}
 811
 812int be_cmd_mccq_ext_create(struct be_adapter *adapter,
 813                        struct be_queue_info *mccq,
 814                        struct be_queue_info *cq)
 815{
 816        struct be_mcc_wrb *wrb;
 817        struct be_cmd_req_mcc_ext_create *req;
 818        struct be_dma_mem *q_mem = &mccq->dma_mem;
 819        void *ctxt;
 820        int status;
 821
 822        if (mutex_lock_interruptible(&adapter->mbox_lock))
 823                return -1;
 824
 825        wrb = wrb_from_mbox(adapter);
 826        req = embedded_payload(wrb);
 827        ctxt = &req->context;
 828
 829        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 830                        OPCODE_COMMON_MCC_CREATE_EXT);
 831
 832        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 833                        OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
 834
 835        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 836        if (lancer_chip(adapter)) {
 837                req->hdr.version = 1;
 838                req->cq_id = cpu_to_le16(cq->id);
 839
 840                AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
 841                                                be_encoded_q_len(mccq->len));
 842                AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
 843                AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
 844                                                                ctxt, cq->id);
 845                AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
 846                                                                 ctxt, 1);
 847
 848        } else {
 849                AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
 850                AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
 851                                                be_encoded_q_len(mccq->len));
 852                AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
 853        }
 854
 855        /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
 856        req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
 857        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 858
 859        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 860
 861        status = be_mbox_notify_wait(adapter);
 862        if (!status) {
 863                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
 864                mccq->id = le16_to_cpu(resp->id);
 865                mccq->created = true;
 866        }
 867        mutex_unlock(&adapter->mbox_lock);
 868
 869        return status;
 870}
 871
 872int be_cmd_mccq_org_create(struct be_adapter *adapter,
 873                        struct be_queue_info *mccq,
 874                        struct be_queue_info *cq)
 875{
 876        struct be_mcc_wrb *wrb;
 877        struct be_cmd_req_mcc_create *req;
 878        struct be_dma_mem *q_mem = &mccq->dma_mem;
 879        void *ctxt;
 880        int status;
 881
 882        if (mutex_lock_interruptible(&adapter->mbox_lock))
 883                return -1;
 884
 885        wrb = wrb_from_mbox(adapter);
 886        req = embedded_payload(wrb);
 887        ctxt = &req->context;
 888
 889        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 890                        OPCODE_COMMON_MCC_CREATE);
 891
 892        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 893                        OPCODE_COMMON_MCC_CREATE, sizeof(*req));
 894
 895        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 896
 897        AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
 898        AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
 899                        be_encoded_q_len(mccq->len));
 900        AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
 901
 902        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 903
 904        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 905
 906        status = be_mbox_notify_wait(adapter);
 907        if (!status) {
 908                struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
 909                mccq->id = le16_to_cpu(resp->id);
 910                mccq->created = true;
 911        }
 912
 913        mutex_unlock(&adapter->mbox_lock);
 914        return status;
 915}
 916
 917int be_cmd_mccq_create(struct be_adapter *adapter,
 918                        struct be_queue_info *mccq,
 919                        struct be_queue_info *cq)
 920{
 921        int status;
 922
 923        status = be_cmd_mccq_ext_create(adapter, mccq, cq);
 924        if (status && !lancer_chip(adapter)) {
 925                dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
 926                        "or newer to avoid conflicting priorities between NIC "
 927                        "and FCoE traffic");
 928                status = be_cmd_mccq_org_create(adapter, mccq, cq);
 929        }
 930        return status;
 931}
 932
 933int be_cmd_txq_create(struct be_adapter *adapter,
 934                        struct be_queue_info *txq,
 935                        struct be_queue_info *cq)
 936{
 937        struct be_mcc_wrb *wrb;
 938        struct be_cmd_req_eth_tx_create *req;
 939        struct be_dma_mem *q_mem = &txq->dma_mem;
 940        void *ctxt;
 941        int status;
 942
 943        if (mutex_lock_interruptible(&adapter->mbox_lock))
 944                return -1;
 945
 946        wrb = wrb_from_mbox(adapter);
 947        req = embedded_payload(wrb);
 948        ctxt = &req->context;
 949
 950        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
 951                        OPCODE_ETH_TX_CREATE);
 952
 953        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
 954                sizeof(*req));
 955
 956        if (lancer_chip(adapter)) {
 957                req->hdr.version = 1;
 958                AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
 959                                        adapter->if_handle);
 960        }
 961
 962        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 963        req->ulp_num = BE_ULP1_NUM;
 964        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
 965
 966        AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
 967                be_encoded_q_len(txq->len));
 968        AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
 969        AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
 970
 971        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 972
 973        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 974
 975        status = be_mbox_notify_wait(adapter);
 976        if (!status) {
 977                struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
 978                txq->id = le16_to_cpu(resp->cid);
 979                txq->created = true;
 980        }
 981
 982        mutex_unlock(&adapter->mbox_lock);
 983
 984        return status;
 985}
 986
 987/* Uses MCC */
 988int be_cmd_rxq_create(struct be_adapter *adapter,
 989                struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
 990                u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
 991{
 992        struct be_mcc_wrb *wrb;
 993        struct be_cmd_req_eth_rx_create *req;
 994        struct be_dma_mem *q_mem = &rxq->dma_mem;
 995        int status;
 996
 997        spin_lock_bh(&adapter->mcc_lock);
 998
 999        wrb = wrb_from_mccq(adapter);
1000        if (!wrb) {
1001                status = -EBUSY;
1002                goto err;
1003        }
1004        req = embedded_payload(wrb);
1005
1006        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1007                        OPCODE_ETH_RX_CREATE);
1008
1009        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
1010                sizeof(*req));
1011
1012        req->cq_id = cpu_to_le16(cq_id);
1013        req->frag_size = fls(frag_size) - 1;
1014        req->num_pages = 2;
1015        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1016        req->interface_id = cpu_to_le32(if_id);
1017        req->max_frame_size = cpu_to_le16(max_frame_size);
1018        req->rss_queue = cpu_to_le32(rss);
1019
1020        status = be_mcc_notify_wait(adapter);
1021        if (!status) {
1022                struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1023                rxq->id = le16_to_cpu(resp->id);
1024                rxq->created = true;
1025                *rss_id = resp->rss_id;
1026        }
1027
1028err:
1029        spin_unlock_bh(&adapter->mcc_lock);
1030        return status;
1031}
1032
1033/* Generic destroyer function for all types of queues
1034 * Uses Mbox
1035 */
1036int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1037                int queue_type)
1038{
1039        struct be_mcc_wrb *wrb;
1040        struct be_cmd_req_q_destroy *req;
1041        u8 subsys = 0, opcode = 0;
1042        int status;
1043
1044        if (adapter->eeh_err)
1045                return -EIO;
1046
1047        if (mutex_lock_interruptible(&adapter->mbox_lock))
1048                return -1;
1049
1050        wrb = wrb_from_mbox(adapter);
1051        req = embedded_payload(wrb);
1052
1053        switch (queue_type) {
1054        case QTYPE_EQ:
1055                subsys = CMD_SUBSYSTEM_COMMON;
1056                opcode = OPCODE_COMMON_EQ_DESTROY;
1057                break;
1058        case QTYPE_CQ:
1059                subsys = CMD_SUBSYSTEM_COMMON;
1060                opcode = OPCODE_COMMON_CQ_DESTROY;
1061                break;
1062        case QTYPE_TXQ:
1063                subsys = CMD_SUBSYSTEM_ETH;
1064                opcode = OPCODE_ETH_TX_DESTROY;
1065                break;
1066        case QTYPE_RXQ:
1067                subsys = CMD_SUBSYSTEM_ETH;
1068                opcode = OPCODE_ETH_RX_DESTROY;
1069                break;
1070        case QTYPE_MCCQ:
1071                subsys = CMD_SUBSYSTEM_COMMON;
1072                opcode = OPCODE_COMMON_MCC_DESTROY;
1073                break;
1074        default:
1075                BUG();
1076        }
1077
1078        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
1079
1080        be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
1081        req->id = cpu_to_le16(q->id);
1082
1083        status = be_mbox_notify_wait(adapter);
1084        if (!status)
1085                q->created = false;
1086
1087        mutex_unlock(&adapter->mbox_lock);
1088        return status;
1089}
1090
1091/* Uses MCC */
1092int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1093{
1094        struct be_mcc_wrb *wrb;
1095        struct be_cmd_req_q_destroy *req;
1096        int status;
1097
1098        spin_lock_bh(&adapter->mcc_lock);
1099
1100        wrb = wrb_from_mccq(adapter);
1101        if (!wrb) {
1102                status = -EBUSY;
1103                goto err;
1104        }
1105        req = embedded_payload(wrb);
1106
1107        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
1108        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
1109                sizeof(*req));
1110        req->id = cpu_to_le16(q->id);
1111
1112        status = be_mcc_notify_wait(adapter);
1113        if (!status)
1114                q->created = false;
1115
1116err:
1117        spin_unlock_bh(&adapter->mcc_lock);
1118        return status;
1119}
1120
1121/* Create an rx filtering policy configuration on an i/f
1122 * Uses mbox
1123 */
1124int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1125                u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
1126                u32 domain)
1127{
1128        struct be_mcc_wrb *wrb;
1129        struct be_cmd_req_if_create *req;
1130        int status;
1131
1132        if (mutex_lock_interruptible(&adapter->mbox_lock))
1133                return -1;
1134
1135        wrb = wrb_from_mbox(adapter);
1136        req = embedded_payload(wrb);
1137
1138        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1139                        OPCODE_COMMON_NTWK_INTERFACE_CREATE);
1140
1141        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1142                OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
1143
1144        req->hdr.domain = domain;
1145        req->capability_flags = cpu_to_le32(cap_flags);
1146        req->enable_flags = cpu_to_le32(en_flags);
1147        req->pmac_invalid = pmac_invalid;
1148        if (!pmac_invalid)
1149                memcpy(req->mac_addr, mac, ETH_ALEN);
1150
1151        status = be_mbox_notify_wait(adapter);
1152        if (!status) {
1153                struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1154                *if_handle = le32_to_cpu(resp->interface_id);
1155                if (!pmac_invalid)
1156                        *pmac_id = le32_to_cpu(resp->pmac_id);
1157        }
1158
1159        mutex_unlock(&adapter->mbox_lock);
1160        return status;
1161}
1162
1163/* Uses mbox */
1164int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1165{
1166        struct be_mcc_wrb *wrb;
1167        struct be_cmd_req_if_destroy *req;
1168        int status;
1169
1170        if (adapter->eeh_err)
1171                return -EIO;
1172
1173        if (mutex_lock_interruptible(&adapter->mbox_lock))
1174                return -1;
1175
1176        wrb = wrb_from_mbox(adapter);
1177        req = embedded_payload(wrb);
1178
1179        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1180                        OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1181
1182        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1183                OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1184
1185        req->hdr.domain = domain;
1186        req->interface_id = cpu_to_le32(interface_id);
1187
1188        status = be_mbox_notify_wait(adapter);
1189
1190        mutex_unlock(&adapter->mbox_lock);
1191
1192        return status;
1193}
1194
1195/* Get stats is a non embedded command: the request is not embedded inside
1196 * WRB but is a separate dma memory block
1197 * Uses asynchronous MCC
1198 */
1199int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1200{
1201        struct be_mcc_wrb *wrb;
1202        struct be_cmd_req_hdr *hdr;
1203        struct be_sge *sge;
1204        int status = 0;
1205
1206        if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1207                be_cmd_get_die_temperature(adapter);
1208
1209        spin_lock_bh(&adapter->mcc_lock);
1210
1211        wrb = wrb_from_mccq(adapter);
1212        if (!wrb) {
1213                status = -EBUSY;
1214                goto err;
1215        }
1216        hdr = nonemb_cmd->va;
1217        sge = nonembedded_sgl(wrb);
1218
1219        be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1220                        OPCODE_ETH_GET_STATISTICS);
1221
1222        be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1223                OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
1224
1225        if (adapter->generation == BE_GEN3)
1226                hdr->version = 1;
1227
1228        wrb->tag1 = CMD_SUBSYSTEM_ETH;
1229        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1230        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1231        sge->len = cpu_to_le32(nonemb_cmd->size);
1232
1233        be_mcc_notify(adapter);
1234        adapter->stats_cmd_sent = true;
1235
1236err:
1237        spin_unlock_bh(&adapter->mcc_lock);
1238        return status;
1239}
1240
1241/* Lancer Stats */
1242int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1243                                struct be_dma_mem *nonemb_cmd)
1244{
1245
1246        struct be_mcc_wrb *wrb;
1247        struct lancer_cmd_req_pport_stats *req;
1248        struct be_sge *sge;
1249        int status = 0;
1250
1251        spin_lock_bh(&adapter->mcc_lock);
1252
1253        wrb = wrb_from_mccq(adapter);
1254        if (!wrb) {
1255                status = -EBUSY;
1256                goto err;
1257        }
1258        req = nonemb_cmd->va;
1259        sge = nonembedded_sgl(wrb);
1260
1261        be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1262                        OPCODE_ETH_GET_PPORT_STATS);
1263
1264        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1265                        OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
1266
1267
1268        req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1269        req->cmd_params.params.reset_stats = 0;
1270
1271        wrb->tag1 = CMD_SUBSYSTEM_ETH;
1272        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1273        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1274        sge->len = cpu_to_le32(nonemb_cmd->size);
1275
1276        be_mcc_notify(adapter);
1277        adapter->stats_cmd_sent = true;
1278
1279err:
1280        spin_unlock_bh(&adapter->mcc_lock);
1281        return status;
1282}
1283
1284/* Uses synchronous mcc */
1285int be_cmd_link_status_query(struct be_adapter *adapter,
1286                        bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
1287{
1288        struct be_mcc_wrb *wrb;
1289        struct be_cmd_req_link_status *req;
1290        int status;
1291
1292        spin_lock_bh(&adapter->mcc_lock);
1293
1294        wrb = wrb_from_mccq(adapter);
1295        if (!wrb) {
1296                status = -EBUSY;
1297                goto err;
1298        }
1299        req = embedded_payload(wrb);
1300
1301        *link_up = false;
1302
1303        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1304                        OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1305
1306        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1307                OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1308
1309        status = be_mcc_notify_wait(adapter);
1310        if (!status) {
1311                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1312                if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1313                        *link_up = true;
1314                        *link_speed = le16_to_cpu(resp->link_speed);
1315                        *mac_speed = resp->mac_speed;
1316                }
1317        }
1318
1319err:
1320        spin_unlock_bh(&adapter->mcc_lock);
1321        return status;
1322}
1323
1324/* Uses synchronous mcc */
1325int be_cmd_get_die_temperature(struct be_adapter *adapter)
1326{
1327        struct be_mcc_wrb *wrb;
1328        struct be_cmd_req_get_cntl_addnl_attribs *req;
1329        int status;
1330
1331        spin_lock_bh(&adapter->mcc_lock);
1332
1333        wrb = wrb_from_mccq(adapter);
1334        if (!wrb) {
1335                status = -EBUSY;
1336                goto err;
1337        }
1338        req = embedded_payload(wrb);
1339
1340        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1341                        OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1342
1343        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1344                OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1345
1346        status = be_mcc_notify_wait(adapter);
1347        if (!status) {
1348                struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1349                                                embedded_payload(wrb);
1350                adapter->drv_stats.be_on_die_temperature =
1351                                                resp->on_die_temperature;
1352        }
1353        /* If IOCTL fails once, do not bother issuing it again */
1354        else
1355                be_get_temp_freq = 0;
1356
1357err:
1358        spin_unlock_bh(&adapter->mcc_lock);
1359        return status;
1360}
1361
1362/* Uses synchronous mcc */
1363int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1364{
1365        struct be_mcc_wrb *wrb;
1366        struct be_cmd_req_get_fat *req;
1367        int status;
1368
1369        spin_lock_bh(&adapter->mcc_lock);
1370
1371        wrb = wrb_from_mccq(adapter);
1372        if (!wrb) {
1373                status = -EBUSY;
1374                goto err;
1375        }
1376        req = embedded_payload(wrb);
1377
1378        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1379                        OPCODE_COMMON_MANAGE_FAT);
1380
1381        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1382                OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1383        req->fat_operation = cpu_to_le32(QUERY_FAT);
1384        status = be_mcc_notify_wait(adapter);
1385        if (!status) {
1386                struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1387                if (log_size && resp->log_size)
1388                        *log_size = le32_to_cpu(resp->log_size) -
1389                                        sizeof(u32);
1390        }
1391err:
1392        spin_unlock_bh(&adapter->mcc_lock);
1393        return status;
1394}
1395
1396void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1397{
1398        struct be_dma_mem get_fat_cmd;
1399        struct be_mcc_wrb *wrb;
1400        struct be_cmd_req_get_fat *req;
1401        struct be_sge *sge;
1402        u32 offset = 0, total_size, buf_size,
1403                                log_offset = sizeof(u32), payload_len;
1404        int status;
1405
1406        if (buf_len == 0)
1407                return;
1408
1409        total_size = buf_len;
1410
1411        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1412        get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1413                        get_fat_cmd.size,
1414                        &get_fat_cmd.dma);
1415        if (!get_fat_cmd.va) {
1416                status = -ENOMEM;
1417                dev_err(&adapter->pdev->dev,
1418                "Memory allocation failure while retrieving FAT data\n");
1419                return;
1420        }
1421
1422        spin_lock_bh(&adapter->mcc_lock);
1423
1424        while (total_size) {
1425                buf_size = min(total_size, (u32)60*1024);
1426                total_size -= buf_size;
1427
1428                wrb = wrb_from_mccq(adapter);
1429                if (!wrb) {
1430                        status = -EBUSY;
1431                        goto err;
1432                }
1433                req = get_fat_cmd.va;
1434                sge = nonembedded_sgl(wrb);
1435
1436                payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1437                be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1438                                OPCODE_COMMON_MANAGE_FAT);
1439
1440                be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1441                                OPCODE_COMMON_MANAGE_FAT, payload_len);
1442
1443                sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
1444                sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1445                sge->len = cpu_to_le32(get_fat_cmd.size);
1446
1447                req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1448                req->read_log_offset = cpu_to_le32(log_offset);
1449                req->read_log_length = cpu_to_le32(buf_size);
1450                req->data_buffer_size = cpu_to_le32(buf_size);
1451
1452                status = be_mcc_notify_wait(adapter);
1453                if (!status) {
1454                        struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1455                        memcpy(buf + offset,
1456                                resp->data_buffer,
1457                                resp->read_log_length);
1458                } else {
1459                        dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1460                        goto err;
1461                }
1462                offset += buf_size;
1463                log_offset += buf_size;
1464        }
1465err:
1466        pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1467                        get_fat_cmd.va,
1468                        get_fat_cmd.dma);
1469        spin_unlock_bh(&adapter->mcc_lock);
1470}
1471
1472/* Uses Mbox */
1473int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1474{
1475        struct be_mcc_wrb *wrb;
1476        struct be_cmd_req_get_fw_version *req;
1477        int status;
1478
1479        if (mutex_lock_interruptible(&adapter->mbox_lock))
1480                return -1;
1481
1482        wrb = wrb_from_mbox(adapter);
1483        req = embedded_payload(wrb);
1484
1485        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1486                        OPCODE_COMMON_GET_FW_VERSION);
1487
1488        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1489                OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1490
1491        status = be_mbox_notify_wait(adapter);
1492        if (!status) {
1493                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1494                strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1495        }
1496
1497        mutex_unlock(&adapter->mbox_lock);
1498        return status;
1499}
1500
1501/* set the EQ delay interval of an EQ to specified value
1502 * Uses async mcc
1503 */
1504int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1505{
1506        struct be_mcc_wrb *wrb;
1507        struct be_cmd_req_modify_eq_delay *req;
1508        int status = 0;
1509
1510        spin_lock_bh(&adapter->mcc_lock);
1511
1512        wrb = wrb_from_mccq(adapter);
1513        if (!wrb) {
1514                status = -EBUSY;
1515                goto err;
1516        }
1517        req = embedded_payload(wrb);
1518
1519        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1520                        OPCODE_COMMON_MODIFY_EQ_DELAY);
1521
1522        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1523                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1524
1525        req->num_eq = cpu_to_le32(1);
1526        req->delay[0].eq_id = cpu_to_le32(eq_id);
1527        req->delay[0].phase = 0;
1528        req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1529
1530        be_mcc_notify(adapter);
1531
1532err:
1533        spin_unlock_bh(&adapter->mcc_lock);
1534        return status;
1535}
1536
1537/* Uses sycnhronous mcc */
1538int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1539                        u32 num, bool untagged, bool promiscuous)
1540{
1541        struct be_mcc_wrb *wrb;
1542        struct be_cmd_req_vlan_config *req;
1543        int status;
1544
1545        spin_lock_bh(&adapter->mcc_lock);
1546
1547        wrb = wrb_from_mccq(adapter);
1548        if (!wrb) {
1549                status = -EBUSY;
1550                goto err;
1551        }
1552        req = embedded_payload(wrb);
1553
1554        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1555                        OPCODE_COMMON_NTWK_VLAN_CONFIG);
1556
1557        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1558                OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1559
1560        req->interface_id = if_id;
1561        req->promiscuous = promiscuous;
1562        req->untagged = untagged;
1563        req->num_vlan = num;
1564        if (!promiscuous) {
1565                memcpy(req->normal_vlan, vtag_array,
1566                        req->num_vlan * sizeof(vtag_array[0]));
1567        }
1568
1569        status = be_mcc_notify_wait(adapter);
1570
1571err:
1572        spin_unlock_bh(&adapter->mcc_lock);
1573        return status;
1574}
1575
1576/* Uses MCC for this command as it may be called in BH context
1577 * Uses synchronous mcc
1578 */
1579int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
1580{
1581        struct be_mcc_wrb *wrb;
1582        struct be_cmd_req_rx_filter *req;
1583        struct be_dma_mem promiscous_cmd;
1584        struct be_sge *sge;
1585        int status;
1586
1587        memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
1588        promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
1589        promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
1590                                promiscous_cmd.size, &promiscous_cmd.dma);
1591        if (!promiscous_cmd.va) {
1592                dev_err(&adapter->pdev->dev,
1593                                "Memory allocation failure\n");
1594                return -ENOMEM;
1595        }
1596
1597        spin_lock_bh(&adapter->mcc_lock);
1598
1599        wrb = wrb_from_mccq(adapter);
1600        if (!wrb) {
1601                status = -EBUSY;
1602                goto err;
1603        }
1604
1605        req = promiscous_cmd.va;
1606        sge = nonembedded_sgl(wrb);
1607
1608        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1609                                        OPCODE_COMMON_NTWK_RX_FILTER);
1610        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1611                        OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
1612
1613        req->if_id = cpu_to_le32(adapter->if_handle);
1614        req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1615        if (en)
1616                req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1617
1618        sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
1619        sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
1620        sge->len = cpu_to_le32(promiscous_cmd.size);
1621
1622        status = be_mcc_notify_wait(adapter);
1623
1624err:
1625        spin_unlock_bh(&adapter->mcc_lock);
1626        pci_free_consistent(adapter->pdev, promiscous_cmd.size,
1627                        promiscous_cmd.va, promiscous_cmd.dma);
1628        return status;
1629}
1630
1631/*
1632 * Uses MCC for this command as it may be called in BH context
1633 * (mc == NULL) => multicast promiscuous
1634 */
1635int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1636                struct net_device *netdev, struct be_dma_mem *mem)
1637{
1638        struct be_mcc_wrb *wrb;
1639        struct be_cmd_req_mcast_mac_config *req = mem->va;
1640        struct be_sge *sge;
1641        int status;
1642
1643        spin_lock_bh(&adapter->mcc_lock);
1644
1645        wrb = wrb_from_mccq(adapter);
1646        if (!wrb) {
1647                status = -EBUSY;
1648                goto err;
1649        }
1650        sge = nonembedded_sgl(wrb);
1651        memset(req, 0, sizeof(*req));
1652
1653        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1654                        OPCODE_COMMON_NTWK_MULTICAST_SET);
1655        sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1656        sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1657        sge->len = cpu_to_le32(mem->size);
1658
1659        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1660                OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1661
1662        req->interface_id = if_id;
1663        if (netdev) {
1664                int i;
1665                struct netdev_hw_addr *ha;
1666
1667                req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1668
1669                i = 0;
1670                netdev_for_each_mc_addr(ha, netdev)
1671                        memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1672        } else {
1673                req->promiscuous = 1;
1674        }
1675
1676        status = be_mcc_notify_wait(adapter);
1677
1678err:
1679        spin_unlock_bh(&adapter->mcc_lock);
1680        return status;
1681}
1682
1683/* Uses synchrounous mcc */
1684int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1685{
1686        struct be_mcc_wrb *wrb;
1687        struct be_cmd_req_set_flow_control *req;
1688        int status;
1689
1690        spin_lock_bh(&adapter->mcc_lock);
1691
1692        wrb = wrb_from_mccq(adapter);
1693        if (!wrb) {
1694                status = -EBUSY;
1695                goto err;
1696        }
1697        req = embedded_payload(wrb);
1698
1699        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1700                        OPCODE_COMMON_SET_FLOW_CONTROL);
1701
1702        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1703                OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1704
1705        req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1706        req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1707
1708        status = be_mcc_notify_wait(adapter);
1709
1710err:
1711        spin_unlock_bh(&adapter->mcc_lock);
1712        return status;
1713}
1714
1715/* Uses sycn mcc */
1716int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1717{
1718        struct be_mcc_wrb *wrb;
1719        struct be_cmd_req_get_flow_control *req;
1720        int status;
1721
1722        spin_lock_bh(&adapter->mcc_lock);
1723
1724        wrb = wrb_from_mccq(adapter);
1725        if (!wrb) {
1726                status = -EBUSY;
1727                goto err;
1728        }
1729        req = embedded_payload(wrb);
1730
1731        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1732                        OPCODE_COMMON_GET_FLOW_CONTROL);
1733
1734        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1735                OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1736
1737        status = be_mcc_notify_wait(adapter);
1738        if (!status) {
1739                struct be_cmd_resp_get_flow_control *resp =
1740                                                embedded_payload(wrb);
1741                *tx_fc = le16_to_cpu(resp->tx_flow_control);
1742                *rx_fc = le16_to_cpu(resp->rx_flow_control);
1743        }
1744
1745err:
1746        spin_unlock_bh(&adapter->mcc_lock);
1747        return status;
1748}
1749
1750/* Uses mbox */
1751int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1752                u32 *mode, u32 *caps)
1753{
1754        struct be_mcc_wrb *wrb;
1755        struct be_cmd_req_query_fw_cfg *req;
1756        int status;
1757
1758        if (mutex_lock_interruptible(&adapter->mbox_lock))
1759                return -1;
1760
1761        wrb = wrb_from_mbox(adapter);
1762        req = embedded_payload(wrb);
1763
1764        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1765                        OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1766
1767        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1768                OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1769
1770        status = be_mbox_notify_wait(adapter);
1771        if (!status) {
1772                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1773                *port_num = le32_to_cpu(resp->phys_port);
1774                *mode = le32_to_cpu(resp->function_mode);
1775                *caps = le32_to_cpu(resp->function_caps);
1776        }
1777
1778        mutex_unlock(&adapter->mbox_lock);
1779        return status;
1780}
1781
1782/* Uses mbox */
1783int be_cmd_reset_function(struct be_adapter *adapter)
1784{
1785        struct be_mcc_wrb *wrb;
1786        struct be_cmd_req_hdr *req;
1787        int status;
1788
1789        if (mutex_lock_interruptible(&adapter->mbox_lock))
1790                return -1;
1791
1792        wrb = wrb_from_mbox(adapter);
1793        req = embedded_payload(wrb);
1794
1795        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1796                        OPCODE_COMMON_FUNCTION_RESET);
1797
1798        be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1799                OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1800
1801        status = be_mbox_notify_wait(adapter);
1802
1803        mutex_unlock(&adapter->mbox_lock);
1804        return status;
1805}
1806
1807int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1808{
1809        struct be_mcc_wrb *wrb;
1810        struct be_cmd_req_rss_config *req;
1811        u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1812                        0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
1813        int status;
1814
1815        if (mutex_lock_interruptible(&adapter->mbox_lock))
1816                return -1;
1817
1818        wrb = wrb_from_mbox(adapter);
1819        req = embedded_payload(wrb);
1820
1821        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1822                OPCODE_ETH_RSS_CONFIG);
1823
1824        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1825                OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1826
1827        req->if_id = cpu_to_le32(adapter->if_handle);
1828        req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1829        req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1830        memcpy(req->cpu_table, rsstable, table_size);
1831        memcpy(req->hash, myhash, sizeof(myhash));
1832        be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1833
1834        status = be_mbox_notify_wait(adapter);
1835
1836        mutex_unlock(&adapter->mbox_lock);
1837        return status;
1838}
1839
1840/* Uses sync mcc */
1841int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1842                        u8 bcn, u8 sts, u8 state)
1843{
1844        struct be_mcc_wrb *wrb;
1845        struct be_cmd_req_enable_disable_beacon *req;
1846        int status;
1847
1848        spin_lock_bh(&adapter->mcc_lock);
1849
1850        wrb = wrb_from_mccq(adapter);
1851        if (!wrb) {
1852                status = -EBUSY;
1853                goto err;
1854        }
1855        req = embedded_payload(wrb);
1856
1857        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1858                        OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1859
1860        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1861                OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1862
1863        req->port_num = port_num;
1864        req->beacon_state = state;
1865        req->beacon_duration = bcn;
1866        req->status_duration = sts;
1867
1868        status = be_mcc_notify_wait(adapter);
1869
1870err:
1871        spin_unlock_bh(&adapter->mcc_lock);
1872        return status;
1873}
1874
1875/* Uses sync mcc */
1876int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1877{
1878        struct be_mcc_wrb *wrb;
1879        struct be_cmd_req_get_beacon_state *req;
1880        int status;
1881
1882        spin_lock_bh(&adapter->mcc_lock);
1883
1884        wrb = wrb_from_mccq(adapter);
1885        if (!wrb) {
1886                status = -EBUSY;
1887                goto err;
1888        }
1889        req = embedded_payload(wrb);
1890
1891        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1892                        OPCODE_COMMON_GET_BEACON_STATE);
1893
1894        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1895                OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1896
1897        req->port_num = port_num;
1898
1899        status = be_mcc_notify_wait(adapter);
1900        if (!status) {
1901                struct be_cmd_resp_get_beacon_state *resp =
1902                                                embedded_payload(wrb);
1903                *state = resp->beacon_state;
1904        }
1905
1906err:
1907        spin_unlock_bh(&adapter->mcc_lock);
1908        return status;
1909}
1910
1911int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1912                        u32 data_size, u32 data_offset, const char *obj_name,
1913                        u32 *data_written, u8 *addn_status)
1914{
1915        struct be_mcc_wrb *wrb;
1916        struct lancer_cmd_req_write_object *req;
1917        struct lancer_cmd_resp_write_object *resp;
1918        void *ctxt = NULL;
1919        int status;
1920
1921        spin_lock_bh(&adapter->mcc_lock);
1922        adapter->flash_status = 0;
1923
1924        wrb = wrb_from_mccq(adapter);
1925        if (!wrb) {
1926                status = -EBUSY;
1927                goto err_unlock;
1928        }
1929
1930        req = embedded_payload(wrb);
1931
1932        be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
1933                        true, 1, OPCODE_COMMON_WRITE_OBJECT);
1934        wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1935
1936        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937                                OPCODE_COMMON_WRITE_OBJECT,
1938                                sizeof(struct lancer_cmd_req_write_object));
1939
1940        ctxt = &req->context;
1941        AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1942                        write_length, ctxt, data_size);
1943
1944        if (data_size == 0)
1945                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1946                                eof, ctxt, 1);
1947        else
1948                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1949                                eof, ctxt, 0);
1950
1951        be_dws_cpu_to_le(ctxt, sizeof(req->context));
1952        req->write_offset = cpu_to_le32(data_offset);
1953        strcpy(req->object_name, obj_name);
1954        req->descriptor_count = cpu_to_le32(1);
1955        req->buf_len = cpu_to_le32(data_size);
1956        req->addr_low = cpu_to_le32((cmd->dma +
1957                                sizeof(struct lancer_cmd_req_write_object))
1958                                & 0xFFFFFFFF);
1959        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1960                                sizeof(struct lancer_cmd_req_write_object)));
1961
1962        be_mcc_notify(adapter);
1963        spin_unlock_bh(&adapter->mcc_lock);
1964
1965        if (!wait_for_completion_timeout(&adapter->flash_compl,
1966                        msecs_to_jiffies(12000)))
1967                status = -1;
1968        else
1969                status = adapter->flash_status;
1970
1971        resp = embedded_payload(wrb);
1972        if (!status) {
1973                *data_written = le32_to_cpu(resp->actual_write_len);
1974        } else {
1975                *addn_status = resp->additional_status;
1976                status = resp->status;
1977        }
1978
1979        return status;
1980
1981err_unlock:
1982        spin_unlock_bh(&adapter->mcc_lock);
1983        return status;
1984}
1985
1986int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1987                        u32 flash_type, u32 flash_opcode, u32 buf_size)
1988{
1989        struct be_mcc_wrb *wrb;
1990        struct be_cmd_write_flashrom *req;
1991        struct be_sge *sge;
1992        int status;
1993
1994        spin_lock_bh(&adapter->mcc_lock);
1995        adapter->flash_status = 0;
1996
1997        wrb = wrb_from_mccq(adapter);
1998        if (!wrb) {
1999                status = -EBUSY;
2000                goto err_unlock;
2001        }
2002        req = cmd->va;
2003        sge = nonembedded_sgl(wrb);
2004
2005        be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
2006                        OPCODE_COMMON_WRITE_FLASHROM);
2007        wrb->tag1 = CMD_SUBSYSTEM_COMMON;
2008
2009        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2010                OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
2011        sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2012        sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2013        sge->len = cpu_to_le32(cmd->size);
2014
2015        req->params.op_type = cpu_to_le32(flash_type);
2016        req->params.op_code = cpu_to_le32(flash_opcode);
2017        req->params.data_buf_size = cpu_to_le32(buf_size);
2018
2019        be_mcc_notify(adapter);
2020        spin_unlock_bh(&adapter->mcc_lock);
2021
2022        if (!wait_for_completion_timeout(&adapter->flash_compl,
2023                        msecs_to_jiffies(12000)))
2024                status = -1;
2025        else
2026                status = adapter->flash_status;
2027
2028        return status;
2029
2030err_unlock:
2031        spin_unlock_bh(&adapter->mcc_lock);
2032        return status;
2033}
2034
2035int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2036                         int offset)
2037{
2038        struct be_mcc_wrb *wrb;
2039        struct be_cmd_write_flashrom *req;
2040        int status;
2041
2042        spin_lock_bh(&adapter->mcc_lock);
2043
2044        wrb = wrb_from_mccq(adapter);
2045        if (!wrb) {
2046                status = -EBUSY;
2047                goto err;
2048        }
2049        req = embedded_payload(wrb);
2050
2051        be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
2052                        OPCODE_COMMON_READ_FLASHROM);
2053
2054        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2055                OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
2056
2057        req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
2058        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2059        req->params.offset = cpu_to_le32(offset);
2060        req->params.data_buf_size = cpu_to_le32(0x4);
2061
2062        status = be_mcc_notify_wait(adapter);
2063        if (!status)
2064                memcpy(flashed_crc, req->params.data_buf, 4);
2065
2066err:
2067        spin_unlock_bh(&adapter->mcc_lock);
2068        return status;
2069}
2070
2071int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2072                                struct be_dma_mem *nonemb_cmd)
2073{
2074        struct be_mcc_wrb *wrb;
2075        struct be_cmd_req_acpi_wol_magic_config *req;
2076        struct be_sge *sge;
2077        int status;
2078
2079        spin_lock_bh(&adapter->mcc_lock);
2080
2081        wrb = wrb_from_mccq(adapter);
2082        if (!wrb) {
2083                status = -EBUSY;
2084                goto err;
2085        }
2086        req = nonemb_cmd->va;
2087        sge = nonembedded_sgl(wrb);
2088
2089        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2090                        OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
2091
2092        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2093                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
2094        memcpy(req->magic_mac, mac, ETH_ALEN);
2095
2096        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2097        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2098        sge->len = cpu_to_le32(nonemb_cmd->size);
2099
2100        status = be_mcc_notify_wait(adapter);
2101
2102err:
2103        spin_unlock_bh(&adapter->mcc_lock);
2104        return status;
2105}
2106
2107int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2108                        u8 loopback_type, u8 enable)
2109{
2110        struct be_mcc_wrb *wrb;
2111        struct be_cmd_req_set_lmode *req;
2112        int status;
2113
2114        spin_lock_bh(&adapter->mcc_lock);
2115
2116        wrb = wrb_from_mccq(adapter);
2117        if (!wrb) {
2118                status = -EBUSY;
2119                goto err;
2120        }
2121
2122        req = embedded_payload(wrb);
2123
2124        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2125                                OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
2126
2127        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2128                        OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
2129                        sizeof(*req));
2130
2131        req->src_port = port_num;
2132        req->dest_port = port_num;
2133        req->loopback_type = loopback_type;
2134        req->loopback_state = enable;
2135
2136        status = be_mcc_notify_wait(adapter);
2137err:
2138        spin_unlock_bh(&adapter->mcc_lock);
2139        return status;
2140}
2141
2142int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2143                u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2144{
2145        struct be_mcc_wrb *wrb;
2146        struct be_cmd_req_loopback_test *req;
2147        int status;
2148
2149        spin_lock_bh(&adapter->mcc_lock);
2150
2151        wrb = wrb_from_mccq(adapter);
2152        if (!wrb) {
2153                status = -EBUSY;
2154                goto err;
2155        }
2156
2157        req = embedded_payload(wrb);
2158
2159        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2160                                OPCODE_LOWLEVEL_LOOPBACK_TEST);
2161
2162        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2163                        OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
2164        req->hdr.timeout = cpu_to_le32(4);
2165
2166        req->pattern = cpu_to_le64(pattern);
2167        req->src_port = cpu_to_le32(port_num);
2168        req->dest_port = cpu_to_le32(port_num);
2169        req->pkt_size = cpu_to_le32(pkt_size);
2170        req->num_pkts = cpu_to_le32(num_pkts);
2171        req->loopback_type = cpu_to_le32(loopback_type);
2172
2173        status = be_mcc_notify_wait(adapter);
2174        if (!status) {
2175                struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2176                status = le32_to_cpu(resp->status);
2177        }
2178
2179err:
2180        spin_unlock_bh(&adapter->mcc_lock);
2181        return status;
2182}
2183
2184int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2185                                u32 byte_cnt, struct be_dma_mem *cmd)
2186{
2187        struct be_mcc_wrb *wrb;
2188        struct be_cmd_req_ddrdma_test *req;
2189        struct be_sge *sge;
2190        int status;
2191        int i, j = 0;
2192
2193        spin_lock_bh(&adapter->mcc_lock);
2194
2195        wrb = wrb_from_mccq(adapter);
2196        if (!wrb) {
2197                status = -EBUSY;
2198                goto err;
2199        }
2200        req = cmd->va;
2201        sge = nonembedded_sgl(wrb);
2202        be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
2203                                OPCODE_LOWLEVEL_HOST_DDR_DMA);
2204        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2205                        OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
2206
2207        sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2208        sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2209        sge->len = cpu_to_le32(cmd->size);
2210
2211        req->pattern = cpu_to_le64(pattern);
2212        req->byte_count = cpu_to_le32(byte_cnt);
2213        for (i = 0; i < byte_cnt; i++) {
2214                req->snd_buff[i] = (u8)(pattern >> (j*8));
2215                j++;
2216                if (j > 7)
2217                        j = 0;
2218        }
2219
2220        status = be_mcc_notify_wait(adapter);
2221
2222        if (!status) {
2223                struct be_cmd_resp_ddrdma_test *resp;
2224                resp = cmd->va;
2225                if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2226                                resp->snd_err) {
2227                        status = -1;
2228                }
2229        }
2230
2231err:
2232        spin_unlock_bh(&adapter->mcc_lock);
2233        return status;
2234}
2235
2236int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2237                                struct be_dma_mem *nonemb_cmd)
2238{
2239        struct be_mcc_wrb *wrb;
2240        struct be_cmd_req_seeprom_read *req;
2241        struct be_sge *sge;
2242        int status;
2243
2244        spin_lock_bh(&adapter->mcc_lock);
2245
2246        wrb = wrb_from_mccq(adapter);
2247        if (!wrb) {
2248                status = -EBUSY;
2249                goto err;
2250        }
2251        req = nonemb_cmd->va;
2252        sge = nonembedded_sgl(wrb);
2253
2254        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2255                        OPCODE_COMMON_SEEPROM_READ);
2256
2257        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2258                        OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
2259
2260        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2261        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2262        sge->len = cpu_to_le32(nonemb_cmd->size);
2263
2264        status = be_mcc_notify_wait(adapter);
2265
2266err:
2267        spin_unlock_bh(&adapter->mcc_lock);
2268        return status;
2269}
2270
2271int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
2272{
2273        struct be_mcc_wrb *wrb;
2274        struct be_cmd_req_get_phy_info *req;
2275        struct be_sge *sge;
2276        int status;
2277
2278        spin_lock_bh(&adapter->mcc_lock);
2279
2280        wrb = wrb_from_mccq(adapter);
2281        if (!wrb) {
2282                status = -EBUSY;
2283                goto err;
2284        }
2285
2286        req = cmd->va;
2287        sge = nonembedded_sgl(wrb);
2288
2289        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2290                                OPCODE_COMMON_GET_PHY_DETAILS);
2291
2292        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2293                        OPCODE_COMMON_GET_PHY_DETAILS,
2294                        sizeof(*req));
2295
2296        sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2297        sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2298        sge->len = cpu_to_le32(cmd->size);
2299
2300        status = be_mcc_notify_wait(adapter);
2301err:
2302        spin_unlock_bh(&adapter->mcc_lock);
2303        return status;
2304}
2305
2306int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2307{
2308        struct be_mcc_wrb *wrb;
2309        struct be_cmd_req_set_qos *req;
2310        int status;
2311
2312        spin_lock_bh(&adapter->mcc_lock);
2313
2314        wrb = wrb_from_mccq(adapter);
2315        if (!wrb) {
2316                status = -EBUSY;
2317                goto err;
2318        }
2319
2320        req = embedded_payload(wrb);
2321
2322        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2323                                OPCODE_COMMON_SET_QOS);
2324
2325        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2326                        OPCODE_COMMON_SET_QOS, sizeof(*req));
2327
2328        req->hdr.domain = domain;
2329        req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2330        req->max_bps_nic = cpu_to_le32(bps);
2331
2332        status = be_mcc_notify_wait(adapter);
2333
2334err:
2335        spin_unlock_bh(&adapter->mcc_lock);
2336        return status;
2337}
2338
2339int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2340{
2341        struct be_mcc_wrb *wrb;
2342        struct be_cmd_req_cntl_attribs *req;
2343        struct be_cmd_resp_cntl_attribs *resp;
2344        struct be_sge *sge;
2345        int status;
2346        int payload_len = max(sizeof(*req), sizeof(*resp));
2347        struct mgmt_controller_attrib *attribs;
2348        struct be_dma_mem attribs_cmd;
2349
2350        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2351        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2352        attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2353                                                &attribs_cmd.dma);
2354        if (!attribs_cmd.va) {
2355                dev_err(&adapter->pdev->dev,
2356                                "Memory allocation failure\n");
2357                return -ENOMEM;
2358        }
2359
2360        if (mutex_lock_interruptible(&adapter->mbox_lock))
2361                return -1;
2362
2363        wrb = wrb_from_mbox(adapter);
2364        if (!wrb) {
2365                status = -EBUSY;
2366                goto err;
2367        }
2368        req = attribs_cmd.va;
2369        sge = nonembedded_sgl(wrb);
2370
2371        be_wrb_hdr_prepare(wrb, payload_len, false, 1,
2372                        OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
2373        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2374                         OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2375        sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2376        sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2377        sge->len = cpu_to_le32(attribs_cmd.size);
2378
2379        status = be_mbox_notify_wait(adapter);
2380        if (!status) {
2381                attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2382                adapter->hba_port_num = attribs->hba_attribs.phy_port;
2383        }
2384
2385err:
2386        mutex_unlock(&adapter->mbox_lock);
2387        pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2388                                        attribs_cmd.dma);
2389        return status;
2390}
2391
2392/* Uses mbox */
2393int be_cmd_req_native_mode(struct be_adapter *adapter)
2394{
2395        struct be_mcc_wrb *wrb;
2396        struct be_cmd_req_set_func_cap *req;
2397        int status;
2398
2399        if (mutex_lock_interruptible(&adapter->mbox_lock))
2400                return -1;
2401
2402        wrb = wrb_from_mbox(adapter);
2403        if (!wrb) {
2404                status = -EBUSY;
2405                goto err;
2406        }
2407
2408        req = embedded_payload(wrb);
2409
2410        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2411                OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2412
2413        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2414                OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2415
2416        req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2417                                CAPABILITY_BE3_NATIVE_ERX_API);
2418        req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2419
2420        status = be_mbox_notify_wait(adapter);
2421        if (!status) {
2422                struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2423                adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2424                                        CAPABILITY_BE3_NATIVE_ERX_API;
2425        }
2426err:
2427        mutex_unlock(&adapter->mbox_lock);
2428        return status;
2429}
2430