linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
<<
>>
Prefs
   1/* bnx2x_sp.c: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2011-2013 Broadcom Corporation
   4 *
   5 * Unless you and Broadcom execute a separate written software license
   6 * agreement governing use of this software, this software is licensed to you
   7 * under the terms of the GNU General Public License version 2, available
   8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
   9 *
  10 * Notwithstanding the above, under no circumstances may you combine this
  11 * software in any way with any other Broadcom software provided under a
  12 * license other than the GPL, without Broadcom's express prior written
  13 * consent.
  14 *
  15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  16 * Written by: Vladislav Zolotarov
  17 *
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/module.h>
  23#include <linux/crc32.h>
  24#include <linux/netdevice.h>
  25#include <linux/etherdevice.h>
  26#include <linux/crc32c.h>
  27#include "bnx2x.h"
  28#include "bnx2x_cmn.h"
  29#include "bnx2x_sp.h"
  30
  31#define BNX2X_MAX_EMUL_MULTI            16
  32
  33/**** Exe Queue interfaces ****/
  34
  35/**
  36 * bnx2x_exe_queue_init - init the Exe Queue object
  37 *
  38 * @o:          pointer to the object
  39 * @exe_len:    length
  40 * @owner:      pointer to the owner
  41 * @validate:   validate function pointer
  42 * @optimize:   optimize function pointer
  43 * @exec:       execute function pointer
  44 * @get:        get function pointer
  45 */
  46static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
  47                                        struct bnx2x_exe_queue_obj *o,
  48                                        int exe_len,
  49                                        union bnx2x_qable_obj *owner,
  50                                        exe_q_validate validate,
  51                                        exe_q_remove remove,
  52                                        exe_q_optimize optimize,
  53                                        exe_q_execute exec,
  54                                        exe_q_get get)
  55{
  56        memset(o, 0, sizeof(*o));
  57
  58        INIT_LIST_HEAD(&o->exe_queue);
  59        INIT_LIST_HEAD(&o->pending_comp);
  60
  61        spin_lock_init(&o->lock);
  62
  63        o->exe_chunk_len = exe_len;
  64        o->owner         = owner;
  65
  66        /* Owner specific callbacks */
  67        o->validate      = validate;
  68        o->remove        = remove;
  69        o->optimize      = optimize;
  70        o->execute       = exec;
  71        o->get           = get;
  72
  73        DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
  74           exe_len);
  75}
  76
  77static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
  78                                             struct bnx2x_exeq_elem *elem)
  79{
  80        DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
  81        kfree(elem);
  82}
  83
  84static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
  85{
  86        struct bnx2x_exeq_elem *elem;
  87        int cnt = 0;
  88
  89        spin_lock_bh(&o->lock);
  90
  91        list_for_each_entry(elem, &o->exe_queue, link)
  92                cnt++;
  93
  94        spin_unlock_bh(&o->lock);
  95
  96        return cnt;
  97}
  98
  99/**
 100 * bnx2x_exe_queue_add - add a new element to the execution queue
 101 *
 102 * @bp:         driver handle
 103 * @o:          queue
 104 * @cmd:        new command to add
 105 * @restore:    true - do not optimize the command
 106 *
 107 * If the element is optimized or is illegal, frees it.
 108 */
 109static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
 110                                      struct bnx2x_exe_queue_obj *o,
 111                                      struct bnx2x_exeq_elem *elem,
 112                                      bool restore)
 113{
 114        int rc;
 115
 116        spin_lock_bh(&o->lock);
 117
 118        if (!restore) {
 119                /* Try to cancel this element queue */
 120                rc = o->optimize(bp, o->owner, elem);
 121                if (rc)
 122                        goto free_and_exit;
 123
 124                /* Check if this request is ok */
 125                rc = o->validate(bp, o->owner, elem);
 126                if (rc) {
 127                        DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
 128                        goto free_and_exit;
 129                }
 130        }
 131
 132        /* If so, add it to the execution queue */
 133        list_add_tail(&elem->link, &o->exe_queue);
 134
 135        spin_unlock_bh(&o->lock);
 136
 137        return 0;
 138
 139free_and_exit:
 140        bnx2x_exe_queue_free_elem(bp, elem);
 141
 142        spin_unlock_bh(&o->lock);
 143
 144        return rc;
 145}
 146
 147static inline void __bnx2x_exe_queue_reset_pending(
 148        struct bnx2x *bp,
 149        struct bnx2x_exe_queue_obj *o)
 150{
 151        struct bnx2x_exeq_elem *elem;
 152
 153        while (!list_empty(&o->pending_comp)) {
 154                elem = list_first_entry(&o->pending_comp,
 155                                        struct bnx2x_exeq_elem, link);
 156
 157                list_del(&elem->link);
 158                bnx2x_exe_queue_free_elem(bp, elem);
 159        }
 160}
 161
 162/**
 163 * bnx2x_exe_queue_step - execute one execution chunk atomically
 164 *
 165 * @bp:                 driver handle
 166 * @o:                  queue
 167 * @ramrod_flags:       flags
 168 *
 169 * (Should be called while holding the exe_queue->lock).
 170 */
 171static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
 172                                       struct bnx2x_exe_queue_obj *o,
 173                                       unsigned long *ramrod_flags)
 174{
 175        struct bnx2x_exeq_elem *elem, spacer;
 176        int cur_len = 0, rc;
 177
 178        memset(&spacer, 0, sizeof(spacer));
 179
 180        /* Next step should not be performed until the current is finished,
 181         * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
 182         * properly clear object internals without sending any command to the FW
 183         * which also implies there won't be any completion to clear the
 184         * 'pending' list.
 185         */
 186        if (!list_empty(&o->pending_comp)) {
 187                if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
 188                        DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
 189                        __bnx2x_exe_queue_reset_pending(bp, o);
 190                } else {
 191                        return 1;
 192                }
 193        }
 194
 195        /* Run through the pending commands list and create a next
 196         * execution chunk.
 197         */
 198        while (!list_empty(&o->exe_queue)) {
 199                elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
 200                                        link);
 201                WARN_ON(!elem->cmd_len);
 202
 203                if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
 204                        cur_len += elem->cmd_len;
 205                        /* Prevent from both lists being empty when moving an
 206                         * element. This will allow the call of
 207                         * bnx2x_exe_queue_empty() without locking.
 208                         */
 209                        list_add_tail(&spacer.link, &o->pending_comp);
 210                        mb();
 211                        list_move_tail(&elem->link, &o->pending_comp);
 212                        list_del(&spacer.link);
 213                } else
 214                        break;
 215        }
 216
 217        /* Sanity check */
 218        if (!cur_len)
 219                return 0;
 220
 221        rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
 222        if (rc < 0)
 223                /* In case of an error return the commands back to the queue
 224                 * and reset the pending_comp.
 225                 */
 226                list_splice_init(&o->pending_comp, &o->exe_queue);
 227        else if (!rc)
 228                /* If zero is returned, means there are no outstanding pending
 229                 * completions and we may dismiss the pending list.
 230                 */
 231                __bnx2x_exe_queue_reset_pending(bp, o);
 232
 233        return rc;
 234}
 235
 236static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
 237{
 238        bool empty = list_empty(&o->exe_queue);
 239
 240        /* Don't reorder!!! */
 241        mb();
 242
 243        return empty && list_empty(&o->pending_comp);
 244}
 245
 246static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
 247        struct bnx2x *bp)
 248{
 249        DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
 250        return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
 251}
 252
 253/************************ raw_obj functions ***********************************/
 254static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
 255{
 256        return !!test_bit(o->state, o->pstate);
 257}
 258
 259static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
 260{
 261        smp_mb__before_atomic();
 262        clear_bit(o->state, o->pstate);
 263        smp_mb__after_atomic();
 264}
 265
 266static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
 267{
 268        smp_mb__before_atomic();
 269        set_bit(o->state, o->pstate);
 270        smp_mb__after_atomic();
 271}
 272
 273/**
 274 * bnx2x_state_wait - wait until the given bit(state) is cleared
 275 *
 276 * @bp:         device handle
 277 * @state:      state which is to be cleared
 278 * @state_p:    state buffer
 279 *
 280 */
 281static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
 282                                   unsigned long *pstate)
 283{
 284        /* can take a while if any port is running */
 285        int cnt = 5000;
 286
 287        if (CHIP_REV_IS_EMUL(bp))
 288                cnt *= 20;
 289
 290        DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
 291
 292        might_sleep();
 293        while (cnt--) {
 294                if (!test_bit(state, pstate)) {
 295#ifdef BNX2X_STOP_ON_ERROR
 296                        DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
 297#endif
 298                        return 0;
 299                }
 300
 301                usleep_range(1000, 2000);
 302
 303                if (bp->panic)
 304                        return -EIO;
 305        }
 306
 307        /* timeout! */
 308        BNX2X_ERR("timeout waiting for state %d\n", state);
 309#ifdef BNX2X_STOP_ON_ERROR
 310        bnx2x_panic();
 311#endif
 312
 313        return -EBUSY;
 314}
 315
 316static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
 317{
 318        return bnx2x_state_wait(bp, raw->state, raw->pstate);
 319}
 320
 321/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
 322/* credit handling callbacks */
 323static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
 324{
 325        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 326
 327        WARN_ON(!mp);
 328
 329        return mp->get_entry(mp, offset);
 330}
 331
 332static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
 333{
 334        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 335
 336        WARN_ON(!mp);
 337
 338        return mp->get(mp, 1);
 339}
 340
 341static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
 342{
 343        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 344
 345        WARN_ON(!vp);
 346
 347        return vp->get_entry(vp, offset);
 348}
 349
 350static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 351{
 352        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 353
 354        WARN_ON(!vp);
 355
 356        return vp->get(vp, 1);
 357}
 358static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 359{
 360        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 361
 362        return mp->put_entry(mp, offset);
 363}
 364
 365static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
 366{
 367        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 368
 369        return mp->put(mp, 1);
 370}
 371
 372static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
 373{
 374        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 375
 376        return vp->put_entry(vp, offset);
 377}
 378
 379static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 380{
 381        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 382
 383        return vp->put(vp, 1);
 384}
 385
 386/**
 387 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
 388 *
 389 * @bp:         device handle
 390 * @o:          vlan_mac object
 391 *
 392 * @details: Non-blocking implementation; should be called under execution
 393 *           queue lock.
 394 */
 395static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
 396                                            struct bnx2x_vlan_mac_obj *o)
 397{
 398        if (o->head_reader) {
 399                DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
 400                return -EBUSY;
 401        }
 402
 403        DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
 404        return 0;
 405}
 406
 407/**
 408 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
 409 *
 410 * @bp:         device handle
 411 * @o:          vlan_mac object
 412 *
 413 * @details Should be called under execution queue lock; notice it might release
 414 *          and reclaim it during its run.
 415 */
 416static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
 417                                            struct bnx2x_vlan_mac_obj *o)
 418{
 419        int rc;
 420        unsigned long ramrod_flags = o->saved_ramrod_flags;
 421
 422        DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
 423           ramrod_flags);
 424        o->head_exe_request = false;
 425        o->saved_ramrod_flags = 0;
 426        rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
 427        if (rc != 0) {
 428                BNX2X_ERR("execution of pending commands failed with rc %d\n",
 429                          rc);
 430#ifdef BNX2X_STOP_ON_ERROR
 431                bnx2x_panic();
 432#endif
 433        }
 434}
 435
 436/**
 437 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
 438 *
 439 * @bp:                 device handle
 440 * @o:                  vlan_mac object
 441 * @ramrod_flags:       ramrod flags of missed execution
 442 *
 443 * @details Should be called under execution queue lock.
 444 */
 445static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
 446                                    struct bnx2x_vlan_mac_obj *o,
 447                                    unsigned long ramrod_flags)
 448{
 449        o->head_exe_request = true;
 450        o->saved_ramrod_flags = ramrod_flags;
 451        DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
 452           ramrod_flags);
 453}
 454
 455/**
 456 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
 457 *
 458 * @bp:                 device handle
 459 * @o:                  vlan_mac object
 460 *
 461 * @details Should be called under execution queue lock. Notice if a pending
 462 *          execution exists, it would perform it - possibly releasing and
 463 *          reclaiming the execution queue lock.
 464 */
 465static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
 466                                            struct bnx2x_vlan_mac_obj *o)
 467{
 468        /* It's possible a new pending execution was added since this writer
 469         * executed. If so, execute again. [Ad infinitum]
 470         */
 471        while (o->head_exe_request) {
 472                DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
 473                __bnx2x_vlan_mac_h_exec_pending(bp, o);
 474        }
 475}
 476
 477
 478/**
 479 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
 480 *
 481 * @bp:                 device handle
 482 * @o:                  vlan_mac object
 483 *
 484 * @details Should be called under the execution queue lock. May sleep. May
 485 *          release and reclaim execution queue lock during its run.
 486 */
 487static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 488                                        struct bnx2x_vlan_mac_obj *o)
 489{
 490        /* If we got here, we're holding lock --> no WRITER exists */
 491        o->head_reader++;
 492        DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
 493           o->head_reader);
 494
 495        return 0;
 496}
 497
 498/**
 499 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
 500 *
 501 * @bp:                 device handle
 502 * @o:                  vlan_mac object
 503 *
 504 * @details May sleep. Claims and releases execution queue lock during its run.
 505 */
 506int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 507                               struct bnx2x_vlan_mac_obj *o)
 508{
 509        int rc;
 510
 511        spin_lock_bh(&o->exe_queue.lock);
 512        rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
 513        spin_unlock_bh(&o->exe_queue.lock);
 514
 515        return rc;
 516}
 517
 518/**
 519 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
 520 *
 521 * @bp:                 device handle
 522 * @o:                  vlan_mac object
 523 *
 524 * @details Should be called under execution queue lock. Notice if a pending
 525 *          execution exists, it would be performed if this was the last
 526 *          reader. possibly releasing and reclaiming the execution queue lock.
 527 */
 528static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
 529                                          struct bnx2x_vlan_mac_obj *o)
 530{
 531        if (!o->head_reader) {
 532                BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
 533#ifdef BNX2X_STOP_ON_ERROR
 534                bnx2x_panic();
 535#endif
 536        } else {
 537                o->head_reader--;
 538                DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
 539                   o->head_reader);
 540        }
 541
 542        /* It's possible a new pending execution was added, and that this reader
 543         * was last - if so we need to execute the command.
 544         */
 545        if (!o->head_reader && o->head_exe_request) {
 546                DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
 547
 548                /* Writer release will do the trick */
 549                __bnx2x_vlan_mac_h_write_unlock(bp, o);
 550        }
 551}
 552
 553/**
 554 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
 555 *
 556 * @bp:                 device handle
 557 * @o:                  vlan_mac object
 558 *
 559 * @details Notice if a pending execution exists, it would be performed if this
 560 *          was the last reader. Claims and releases the execution queue lock
 561 *          during its run.
 562 */
 563void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
 564                                  struct bnx2x_vlan_mac_obj *o)
 565{
 566        spin_lock_bh(&o->exe_queue.lock);
 567        __bnx2x_vlan_mac_h_read_unlock(bp, o);
 568        spin_unlock_bh(&o->exe_queue.lock);
 569}
 570
 571static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
 572                                int n, u8 *base, u8 stride, u8 size)
 573{
 574        struct bnx2x_vlan_mac_registry_elem *pos;
 575        u8 *next = base;
 576        int counter = 0;
 577        int read_lock;
 578
 579        DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
 580        read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
 581        if (read_lock != 0)
 582                BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
 583
 584        /* traverse list */
 585        list_for_each_entry(pos, &o->head, link) {
 586                if (counter < n) {
 587                        memcpy(next, &pos->u, size);
 588                        counter++;
 589                        DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
 590                           counter, next);
 591                        next += stride + size;
 592                }
 593        }
 594
 595        if (read_lock == 0) {
 596                DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
 597                bnx2x_vlan_mac_h_read_unlock(bp, o);
 598        }
 599
 600        return counter * ETH_ALEN;
 601}
 602
 603/* check_add() callbacks */
 604static int bnx2x_check_mac_add(struct bnx2x *bp,
 605                               struct bnx2x_vlan_mac_obj *o,
 606                               union bnx2x_classification_ramrod_data *data)
 607{
 608        struct bnx2x_vlan_mac_registry_elem *pos;
 609
 610        DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
 611
 612        if (!is_valid_ether_addr(data->mac.mac))
 613                return -EINVAL;
 614
 615        /* Check if a requested MAC already exists */
 616        list_for_each_entry(pos, &o->head, link)
 617                if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
 618                    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
 619                        return -EEXIST;
 620
 621        return 0;
 622}
 623
 624static int bnx2x_check_vlan_add(struct bnx2x *bp,
 625                                struct bnx2x_vlan_mac_obj *o,
 626                                union bnx2x_classification_ramrod_data *data)
 627{
 628        struct bnx2x_vlan_mac_registry_elem *pos;
 629
 630        DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
 631
 632        list_for_each_entry(pos, &o->head, link)
 633                if (data->vlan.vlan == pos->u.vlan.vlan)
 634                        return -EEXIST;
 635
 636        return 0;
 637}
 638
 639/* check_del() callbacks */
 640static struct bnx2x_vlan_mac_registry_elem *
 641        bnx2x_check_mac_del(struct bnx2x *bp,
 642                            struct bnx2x_vlan_mac_obj *o,
 643                            union bnx2x_classification_ramrod_data *data)
 644{
 645        struct bnx2x_vlan_mac_registry_elem *pos;
 646
 647        DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
 648
 649        list_for_each_entry(pos, &o->head, link)
 650                if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
 651                    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
 652                        return pos;
 653
 654        return NULL;
 655}
 656
 657static struct bnx2x_vlan_mac_registry_elem *
 658        bnx2x_check_vlan_del(struct bnx2x *bp,
 659                             struct bnx2x_vlan_mac_obj *o,
 660                             union bnx2x_classification_ramrod_data *data)
 661{
 662        struct bnx2x_vlan_mac_registry_elem *pos;
 663
 664        DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
 665
 666        list_for_each_entry(pos, &o->head, link)
 667                if (data->vlan.vlan == pos->u.vlan.vlan)
 668                        return pos;
 669
 670        return NULL;
 671}
 672
 673/* check_move() callback */
 674static bool bnx2x_check_move(struct bnx2x *bp,
 675                             struct bnx2x_vlan_mac_obj *src_o,
 676                             struct bnx2x_vlan_mac_obj *dst_o,
 677                             union bnx2x_classification_ramrod_data *data)
 678{
 679        struct bnx2x_vlan_mac_registry_elem *pos;
 680        int rc;
 681
 682        /* Check if we can delete the requested configuration from the first
 683         * object.
 684         */
 685        pos = src_o->check_del(bp, src_o, data);
 686
 687        /*  check if configuration can be added */
 688        rc = dst_o->check_add(bp, dst_o, data);
 689
 690        /* If this classification can not be added (is already set)
 691         * or can't be deleted - return an error.
 692         */
 693        if (rc || !pos)
 694                return false;
 695
 696        return true;
 697}
 698
 699static bool bnx2x_check_move_always_err(
 700        struct bnx2x *bp,
 701        struct bnx2x_vlan_mac_obj *src_o,
 702        struct bnx2x_vlan_mac_obj *dst_o,
 703        union bnx2x_classification_ramrod_data *data)
 704{
 705        return false;
 706}
 707
 708static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
 709{
 710        struct bnx2x_raw_obj *raw = &o->raw;
 711        u8 rx_tx_flag = 0;
 712
 713        if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
 714            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
 715                rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
 716
 717        if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
 718            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
 719                rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
 720
 721        return rx_tx_flag;
 722}
 723
 724static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
 725                                 bool add, unsigned char *dev_addr, int index)
 726{
 727        u32 wb_data[2];
 728        u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
 729                         NIG_REG_LLH0_FUNC_MEM;
 730
 731        if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
 732                return;
 733
 734        if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
 735                return;
 736
 737        DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
 738                         (add ? "ADD" : "DELETE"), index);
 739
 740        if (add) {
 741                /* LLH_FUNC_MEM is a u64 WB register */
 742                reg_offset += 8*index;
 743
 744                wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
 745                              (dev_addr[4] <<  8) |  dev_addr[5]);
 746                wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
 747
 748                REG_WR_DMAE(bp, reg_offset, wb_data, 2);
 749        }
 750
 751        REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
 752                                  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
 753}
 754
 755/**
 756 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
 757 *
 758 * @bp:         device handle
 759 * @o:          queue for which we want to configure this rule
 760 * @add:        if true the command is an ADD command, DEL otherwise
 761 * @opcode:     CLASSIFY_RULE_OPCODE_XXX
 762 * @hdr:        pointer to a header to setup
 763 *
 764 */
 765static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
 766        struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
 767        struct eth_classify_cmd_header *hdr)
 768{
 769        struct bnx2x_raw_obj *raw = &o->raw;
 770
 771        hdr->client_id = raw->cl_id;
 772        hdr->func_id = raw->func_id;
 773
 774        /* Rx or/and Tx (internal switching) configuration ? */
 775        hdr->cmd_general_data |=
 776                bnx2x_vlan_mac_get_rx_tx_flag(o);
 777
 778        if (add)
 779                hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
 780
 781        hdr->cmd_general_data |=
 782                (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
 783}
 784
 785/**
 786 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
 787 *
 788 * @cid:        connection id
 789 * @type:       BNX2X_FILTER_XXX_PENDING
 790 * @hdr:        pointer to header to setup
 791 * @rule_cnt:
 792 *
 793 * currently we always configure one rule and echo field to contain a CID and an
 794 * opcode type.
 795 */
 796static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
 797                                struct eth_classify_header *hdr, int rule_cnt)
 798{
 799        hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
 800                                (type << BNX2X_SWCID_SHIFT));
 801        hdr->rule_cnt = (u8)rule_cnt;
 802}
 803
 804/* hw_config() callbacks */
 805static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
 806                                 struct bnx2x_vlan_mac_obj *o,
 807                                 struct bnx2x_exeq_elem *elem, int rule_idx,
 808                                 int cam_offset)
 809{
 810        struct bnx2x_raw_obj *raw = &o->raw;
 811        struct eth_classify_rules_ramrod_data *data =
 812                (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 813        int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
 814        union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
 815        bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
 816        unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
 817        u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
 818
 819        /* Set LLH CAM entry: currently only iSCSI and ETH macs are
 820         * relevant. In addition, current implementation is tuned for a
 821         * single ETH MAC.
 822         *
 823         * When multiple unicast ETH MACs PF configuration in switch
 824         * independent mode is required (NetQ, multiple netdev MACs,
 825         * etc.), consider better utilisation of 8 per function MAC
 826         * entries in the LLH register. There is also
 827         * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
 828         * total number of CAM entries to 16.
 829         *
 830         * Currently we won't configure NIG for MACs other than a primary ETH
 831         * MAC and iSCSI L2 MAC.
 832         *
 833         * If this MAC is moving from one Queue to another, no need to change
 834         * NIG configuration.
 835         */
 836        if (cmd != BNX2X_VLAN_MAC_MOVE) {
 837                if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
 838                        bnx2x_set_mac_in_nig(bp, add, mac,
 839                                             BNX2X_LLH_CAM_ISCSI_ETH_LINE);
 840                else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
 841                        bnx2x_set_mac_in_nig(bp, add, mac,
 842                                             BNX2X_LLH_CAM_ETH_LINE);
 843        }
 844
 845        /* Reset the ramrod data buffer for the first rule */
 846        if (rule_idx == 0)
 847                memset(data, 0, sizeof(*data));
 848
 849        /* Setup a command header */
 850        bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
 851                                      &rule_entry->mac.header);
 852
 853        DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
 854           (add ? "add" : "delete"), mac, raw->cl_id);
 855
 856        /* Set a MAC itself */
 857        bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
 858                              &rule_entry->mac.mac_mid,
 859                              &rule_entry->mac.mac_lsb, mac);
 860        rule_entry->mac.inner_mac =
 861                cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
 862
 863        /* MOVE: Add a rule that will add this MAC to the target Queue */
 864        if (cmd == BNX2X_VLAN_MAC_MOVE) {
 865                rule_entry++;
 866                rule_cnt++;
 867
 868                /* Setup ramrod data */
 869                bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
 870                                        elem->cmd_data.vlan_mac.target_obj,
 871                                              true, CLASSIFY_RULE_OPCODE_MAC,
 872                                              &rule_entry->mac.header);
 873
 874                /* Set a MAC itself */
 875                bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
 876                                      &rule_entry->mac.mac_mid,
 877                                      &rule_entry->mac.mac_lsb, mac);
 878                rule_entry->mac.inner_mac =
 879                        cpu_to_le16(elem->cmd_data.vlan_mac.
 880                                                u.mac.is_inner_mac);
 881        }
 882
 883        /* Set the ramrod data header */
 884        /* TODO: take this to the higher level in order to prevent multiple
 885                 writing */
 886        bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
 887                                        rule_cnt);
 888}
 889
 890/**
 891 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
 892 *
 893 * @bp:         device handle
 894 * @o:          queue
 895 * @type:
 896 * @cam_offset: offset in cam memory
 897 * @hdr:        pointer to a header to setup
 898 *
 899 * E1/E1H
 900 */
 901static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
 902        struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
 903        struct mac_configuration_hdr *hdr)
 904{
 905        struct bnx2x_raw_obj *r = &o->raw;
 906
 907        hdr->length = 1;
 908        hdr->offset = (u8)cam_offset;
 909        hdr->client_id = cpu_to_le16(0xff);
 910        hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
 911                                (type << BNX2X_SWCID_SHIFT));
 912}
 913
 914static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
 915        struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
 916        u16 vlan_id, struct mac_configuration_entry *cfg_entry)
 917{
 918        struct bnx2x_raw_obj *r = &o->raw;
 919        u32 cl_bit_vec = (1 << r->cl_id);
 920
 921        cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
 922        cfg_entry->pf_id = r->func_id;
 923        cfg_entry->vlan_id = cpu_to_le16(vlan_id);
 924
 925        if (add) {
 926                SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 927                         T_ETH_MAC_COMMAND_SET);
 928                SET_FLAG(cfg_entry->flags,
 929                         MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
 930
 931                /* Set a MAC in a ramrod data */
 932                bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
 933                                      &cfg_entry->middle_mac_addr,
 934                                      &cfg_entry->lsb_mac_addr, mac);
 935        } else
 936                SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 937                         T_ETH_MAC_COMMAND_INVALIDATE);
 938}
 939
 940static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
 941        struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
 942        u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
 943{
 944        struct mac_configuration_entry *cfg_entry = &config->config_table[0];
 945        struct bnx2x_raw_obj *raw = &o->raw;
 946
 947        bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
 948                                         &config->hdr);
 949        bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
 950                                         cfg_entry);
 951
 952        DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
 953                         (add ? "setting" : "clearing"),
 954                         mac, raw->cl_id, cam_offset);
 955}
 956
 957/**
 958 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
 959 *
 960 * @bp:         device handle
 961 * @o:          bnx2x_vlan_mac_obj
 962 * @elem:       bnx2x_exeq_elem
 963 * @rule_idx:   rule_idx
 964 * @cam_offset: cam_offset
 965 */
 966static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
 967                                  struct bnx2x_vlan_mac_obj *o,
 968                                  struct bnx2x_exeq_elem *elem, int rule_idx,
 969                                  int cam_offset)
 970{
 971        struct bnx2x_raw_obj *raw = &o->raw;
 972        struct mac_configuration_cmd *config =
 973                (struct mac_configuration_cmd *)(raw->rdata);
 974        /* 57710 and 57711 do not support MOVE command,
 975         * so it's either ADD or DEL
 976         */
 977        bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
 978                true : false;
 979
 980        /* Reset the ramrod data buffer */
 981        memset(config, 0, sizeof(*config));
 982
 983        bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
 984                                     cam_offset, add,
 985                                     elem->cmd_data.vlan_mac.u.mac.mac, 0,
 986                                     ETH_VLAN_FILTER_ANY_VLAN, config);
 987}
 988
 989static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
 990                                  struct bnx2x_vlan_mac_obj *o,
 991                                  struct bnx2x_exeq_elem *elem, int rule_idx,
 992                                  int cam_offset)
 993{
 994        struct bnx2x_raw_obj *raw = &o->raw;
 995        struct eth_classify_rules_ramrod_data *data =
 996                (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 997        int rule_cnt = rule_idx + 1;
 998        union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
 999        enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1000        bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1001        u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1002
1003        /* Reset the ramrod data buffer for the first rule */
1004        if (rule_idx == 0)
1005                memset(data, 0, sizeof(*data));
1006
1007        /* Set a rule header */
1008        bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1009                                      &rule_entry->vlan.header);
1010
1011        DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1012                         vlan);
1013
1014        /* Set a VLAN itself */
1015        rule_entry->vlan.vlan = cpu_to_le16(vlan);
1016
1017        /* MOVE: Add a rule that will add this MAC to the target Queue */
1018        if (cmd == BNX2X_VLAN_MAC_MOVE) {
1019                rule_entry++;
1020                rule_cnt++;
1021
1022                /* Setup ramrod data */
1023                bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1024                                        elem->cmd_data.vlan_mac.target_obj,
1025                                              true, CLASSIFY_RULE_OPCODE_VLAN,
1026                                              &rule_entry->vlan.header);
1027
1028                /* Set a VLAN itself */
1029                rule_entry->vlan.vlan = cpu_to_le16(vlan);
1030        }
1031
1032        /* Set the ramrod data header */
1033        /* TODO: take this to the higher level in order to prevent multiple
1034                 writing */
1035        bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1036                                        rule_cnt);
1037}
1038
1039/**
1040 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1041 *
1042 * @bp:         device handle
1043 * @p:          command parameters
1044 * @ppos:       pointer to the cookie
1045 *
1046 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1047 * previously configured elements list.
1048 *
1049 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1050 * into an account
1051 *
1052 * pointer to the cookie  - that should be given back in the next call to make
1053 * function handle the next element. If *ppos is set to NULL it will restart the
1054 * iterator. If returned *ppos == NULL this means that the last element has been
1055 * handled.
1056 *
1057 */
1058static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1059                           struct bnx2x_vlan_mac_ramrod_params *p,
1060                           struct bnx2x_vlan_mac_registry_elem **ppos)
1061{
1062        struct bnx2x_vlan_mac_registry_elem *pos;
1063        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1064
1065        /* If list is empty - there is nothing to do here */
1066        if (list_empty(&o->head)) {
1067                *ppos = NULL;
1068                return 0;
1069        }
1070
1071        /* make a step... */
1072        if (*ppos == NULL)
1073                *ppos = list_first_entry(&o->head,
1074                                         struct bnx2x_vlan_mac_registry_elem,
1075                                         link);
1076        else
1077                *ppos = list_next_entry(*ppos, link);
1078
1079        pos = *ppos;
1080
1081        /* If it's the last step - return NULL */
1082        if (list_is_last(&pos->link, &o->head))
1083                *ppos = NULL;
1084
1085        /* Prepare a 'user_req' */
1086        memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1087
1088        /* Set the command */
1089        p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1090
1091        /* Set vlan_mac_flags */
1092        p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1093
1094        /* Set a restore bit */
1095        __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1096
1097        return bnx2x_config_vlan_mac(bp, p);
1098}
1099
1100/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1101 * pointer to an element with a specific criteria and NULL if such an element
1102 * hasn't been found.
1103 */
1104static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1105        struct bnx2x_exe_queue_obj *o,
1106        struct bnx2x_exeq_elem *elem)
1107{
1108        struct bnx2x_exeq_elem *pos;
1109        struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1110
1111        /* Check pending for execution commands */
1112        list_for_each_entry(pos, &o->exe_queue, link)
1113                if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1114                              sizeof(*data)) &&
1115                    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1116                        return pos;
1117
1118        return NULL;
1119}
1120
1121static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1122        struct bnx2x_exe_queue_obj *o,
1123        struct bnx2x_exeq_elem *elem)
1124{
1125        struct bnx2x_exeq_elem *pos;
1126        struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1127
1128        /* Check pending for execution commands */
1129        list_for_each_entry(pos, &o->exe_queue, link)
1130                if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1131                              sizeof(*data)) &&
1132                    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1133                        return pos;
1134
1135        return NULL;
1136}
1137
1138/**
1139 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1140 *
1141 * @bp:         device handle
1142 * @qo:         bnx2x_qable_obj
1143 * @elem:       bnx2x_exeq_elem
1144 *
1145 * Checks that the requested configuration can be added. If yes and if
1146 * requested, consume CAM credit.
1147 *
1148 * The 'validate' is run after the 'optimize'.
1149 *
1150 */
1151static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1152                                              union bnx2x_qable_obj *qo,
1153                                              struct bnx2x_exeq_elem *elem)
1154{
1155        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1156        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1157        int rc;
1158
1159        /* Check the registry */
1160        rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1161        if (rc) {
1162                DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1163                return rc;
1164        }
1165
1166        /* Check if there is a pending ADD command for this
1167         * MAC/VLAN/VLAN-MAC. Return an error if there is.
1168         */
1169        if (exeq->get(exeq, elem)) {
1170                DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1171                return -EEXIST;
1172        }
1173
1174        /* TODO: Check the pending MOVE from other objects where this
1175         * object is a destination object.
1176         */
1177
1178        /* Consume the credit if not requested not to */
1179        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1180                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1181            o->get_credit(o)))
1182                return -EINVAL;
1183
1184        return 0;
1185}
1186
1187/**
1188 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1189 *
1190 * @bp:         device handle
1191 * @qo:         quable object to check
1192 * @elem:       element that needs to be deleted
1193 *
1194 * Checks that the requested configuration can be deleted. If yes and if
1195 * requested, returns a CAM credit.
1196 *
1197 * The 'validate' is run after the 'optimize'.
1198 */
1199static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1200                                              union bnx2x_qable_obj *qo,
1201                                              struct bnx2x_exeq_elem *elem)
1202{
1203        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1204        struct bnx2x_vlan_mac_registry_elem *pos;
1205        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1206        struct bnx2x_exeq_elem query_elem;
1207
1208        /* If this classification can not be deleted (doesn't exist)
1209         * - return a BNX2X_EXIST.
1210         */
1211        pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1212        if (!pos) {
1213                DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1214                return -EEXIST;
1215        }
1216
1217        /* Check if there are pending DEL or MOVE commands for this
1218         * MAC/VLAN/VLAN-MAC. Return an error if so.
1219         */
1220        memcpy(&query_elem, elem, sizeof(query_elem));
1221
1222        /* Check for MOVE commands */
1223        query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1224        if (exeq->get(exeq, &query_elem)) {
1225                BNX2X_ERR("There is a pending MOVE command already\n");
1226                return -EINVAL;
1227        }
1228
1229        /* Check for DEL commands */
1230        if (exeq->get(exeq, elem)) {
1231                DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1232                return -EEXIST;
1233        }
1234
1235        /* Return the credit to the credit pool if not requested not to */
1236        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1237                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1238            o->put_credit(o))) {
1239                BNX2X_ERR("Failed to return a credit\n");
1240                return -EINVAL;
1241        }
1242
1243        return 0;
1244}
1245
1246/**
1247 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1248 *
1249 * @bp:         device handle
1250 * @qo:         quable object to check (source)
1251 * @elem:       element that needs to be moved
1252 *
1253 * Checks that the requested configuration can be moved. If yes and if
1254 * requested, returns a CAM credit.
1255 *
1256 * The 'validate' is run after the 'optimize'.
1257 */
1258static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1259                                               union bnx2x_qable_obj *qo,
1260                                               struct bnx2x_exeq_elem *elem)
1261{
1262        struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1263        struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1264        struct bnx2x_exeq_elem query_elem;
1265        struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1266        struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1267
1268        /* Check if we can perform this operation based on the current registry
1269         * state.
1270         */
1271        if (!src_o->check_move(bp, src_o, dest_o,
1272                               &elem->cmd_data.vlan_mac.u)) {
1273                DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1274                return -EINVAL;
1275        }
1276
1277        /* Check if there is an already pending DEL or MOVE command for the
1278         * source object or ADD command for a destination object. Return an
1279         * error if so.
1280         */
1281        memcpy(&query_elem, elem, sizeof(query_elem));
1282
1283        /* Check DEL on source */
1284        query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1285        if (src_exeq->get(src_exeq, &query_elem)) {
1286                BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1287                return -EINVAL;
1288        }
1289
1290        /* Check MOVE on source */
1291        if (src_exeq->get(src_exeq, elem)) {
1292                DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1293                return -EEXIST;
1294        }
1295
1296        /* Check ADD on destination */
1297        query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1298        if (dest_exeq->get(dest_exeq, &query_elem)) {
1299                BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1300                return -EINVAL;
1301        }
1302
1303        /* Consume the credit if not requested not to */
1304        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1305                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1306            dest_o->get_credit(dest_o)))
1307                return -EINVAL;
1308
1309        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1310                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1311            src_o->put_credit(src_o))) {
1312                /* return the credit taken from dest... */
1313                dest_o->put_credit(dest_o);
1314                return -EINVAL;
1315        }
1316
1317        return 0;
1318}
1319
1320static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1321                                   union bnx2x_qable_obj *qo,
1322                                   struct bnx2x_exeq_elem *elem)
1323{
1324        switch (elem->cmd_data.vlan_mac.cmd) {
1325        case BNX2X_VLAN_MAC_ADD:
1326                return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1327        case BNX2X_VLAN_MAC_DEL:
1328                return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1329        case BNX2X_VLAN_MAC_MOVE:
1330                return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1331        default:
1332                return -EINVAL;
1333        }
1334}
1335
1336static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1337                                  union bnx2x_qable_obj *qo,
1338                                  struct bnx2x_exeq_elem *elem)
1339{
1340        int rc = 0;
1341
1342        /* If consumption wasn't required, nothing to do */
1343        if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1344                     &elem->cmd_data.vlan_mac.vlan_mac_flags))
1345                return 0;
1346
1347        switch (elem->cmd_data.vlan_mac.cmd) {
1348        case BNX2X_VLAN_MAC_ADD:
1349        case BNX2X_VLAN_MAC_MOVE:
1350                rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1351                break;
1352        case BNX2X_VLAN_MAC_DEL:
1353                rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1354                break;
1355        default:
1356                return -EINVAL;
1357        }
1358
1359        if (rc != true)
1360                return -EINVAL;
1361
1362        return 0;
1363}
1364
1365/**
1366 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1367 *
1368 * @bp:         device handle
1369 * @o:          bnx2x_vlan_mac_obj
1370 *
1371 */
1372static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1373                               struct bnx2x_vlan_mac_obj *o)
1374{
1375        int cnt = 5000, rc;
1376        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1377        struct bnx2x_raw_obj *raw = &o->raw;
1378
1379        while (cnt--) {
1380                /* Wait for the current command to complete */
1381                rc = raw->wait_comp(bp, raw);
1382                if (rc)
1383                        return rc;
1384
1385                /* Wait until there are no pending commands */
1386                if (!bnx2x_exe_queue_empty(exeq))
1387                        usleep_range(1000, 2000);
1388                else
1389                        return 0;
1390        }
1391
1392        return -EBUSY;
1393}
1394
1395static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1396                                         struct bnx2x_vlan_mac_obj *o,
1397                                         unsigned long *ramrod_flags)
1398{
1399        int rc = 0;
1400
1401        spin_lock_bh(&o->exe_queue.lock);
1402
1403        DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1404        rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1405
1406        if (rc != 0) {
1407                __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1408
1409                /* Calling function should not diffrentiate between this case
1410                 * and the case in which there is already a pending ramrod
1411                 */
1412                rc = 1;
1413        } else {
1414                rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1415        }
1416        spin_unlock_bh(&o->exe_queue.lock);
1417
1418        return rc;
1419}
1420
1421/**
1422 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1423 *
1424 * @bp:         device handle
1425 * @o:          bnx2x_vlan_mac_obj
1426 * @cqe:
1427 * @cont:       if true schedule next execution chunk
1428 *
1429 */
1430static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1431                                   struct bnx2x_vlan_mac_obj *o,
1432                                   union event_ring_elem *cqe,
1433                                   unsigned long *ramrod_flags)
1434{
1435        struct bnx2x_raw_obj *r = &o->raw;
1436        int rc;
1437
1438        /* Clearing the pending list & raw state should be made
1439         * atomically (as execution flow assumes they represent the same).
1440         */
1441        spin_lock_bh(&o->exe_queue.lock);
1442
1443        /* Reset pending list */
1444        __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1445
1446        /* Clear pending */
1447        r->clear_pending(r);
1448
1449        spin_unlock_bh(&o->exe_queue.lock);
1450
1451        /* If ramrod failed this is most likely a SW bug */
1452        if (cqe->message.error)
1453                return -EINVAL;
1454
1455        /* Run the next bulk of pending commands if requested */
1456        if (test_bit(RAMROD_CONT, ramrod_flags)) {
1457                rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1458
1459                if (rc < 0)
1460                        return rc;
1461        }
1462
1463        /* If there is more work to do return PENDING */
1464        if (!bnx2x_exe_queue_empty(&o->exe_queue))
1465                return 1;
1466
1467        return 0;
1468}
1469
1470/**
1471 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1472 *
1473 * @bp:         device handle
1474 * @o:          bnx2x_qable_obj
1475 * @elem:       bnx2x_exeq_elem
1476 */
1477static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1478                                   union bnx2x_qable_obj *qo,
1479                                   struct bnx2x_exeq_elem *elem)
1480{
1481        struct bnx2x_exeq_elem query, *pos;
1482        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1483        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1484
1485        memcpy(&query, elem, sizeof(query));
1486
1487        switch (elem->cmd_data.vlan_mac.cmd) {
1488        case BNX2X_VLAN_MAC_ADD:
1489                query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1490                break;
1491        case BNX2X_VLAN_MAC_DEL:
1492                query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1493                break;
1494        default:
1495                /* Don't handle anything other than ADD or DEL */
1496                return 0;
1497        }
1498
1499        /* If we found the appropriate element - delete it */
1500        pos = exeq->get(exeq, &query);
1501        if (pos) {
1502
1503                /* Return the credit of the optimized command */
1504                if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1505                              &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1506                        if ((query.cmd_data.vlan_mac.cmd ==
1507                             BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1508                                BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1509                                return -EINVAL;
1510                        } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1511                                BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1512                                return -EINVAL;
1513                        }
1514                }
1515
1516                DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1517                           (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1518                           "ADD" : "DEL");
1519
1520                list_del(&pos->link);
1521                bnx2x_exe_queue_free_elem(bp, pos);
1522                return 1;
1523        }
1524
1525        return 0;
1526}
1527
1528/**
1529 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1530 *
1531 * @bp:   device handle
1532 * @o:
1533 * @elem:
1534 * @restore:
1535 * @re:
1536 *
1537 * prepare a registry element according to the current command request.
1538 */
1539static inline int bnx2x_vlan_mac_get_registry_elem(
1540        struct bnx2x *bp,
1541        struct bnx2x_vlan_mac_obj *o,
1542        struct bnx2x_exeq_elem *elem,
1543        bool restore,
1544        struct bnx2x_vlan_mac_registry_elem **re)
1545{
1546        enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1547        struct bnx2x_vlan_mac_registry_elem *reg_elem;
1548
1549        /* Allocate a new registry element if needed. */
1550        if (!restore &&
1551            ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1552                reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1553                if (!reg_elem)
1554                        return -ENOMEM;
1555
1556                /* Get a new CAM offset */
1557                if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1558                        /* This shall never happen, because we have checked the
1559                         * CAM availability in the 'validate'.
1560                         */
1561                        WARN_ON(1);
1562                        kfree(reg_elem);
1563                        return -EINVAL;
1564                }
1565
1566                DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1567
1568                /* Set a VLAN-MAC data */
1569                memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1570                          sizeof(reg_elem->u));
1571
1572                /* Copy the flags (needed for DEL and RESTORE flows) */
1573                reg_elem->vlan_mac_flags =
1574                        elem->cmd_data.vlan_mac.vlan_mac_flags;
1575        } else /* DEL, RESTORE */
1576                reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1577
1578        *re = reg_elem;
1579        return 0;
1580}
1581
1582/**
1583 * bnx2x_execute_vlan_mac - execute vlan mac command
1584 *
1585 * @bp:                 device handle
1586 * @qo:
1587 * @exe_chunk:
1588 * @ramrod_flags:
1589 *
1590 * go and send a ramrod!
1591 */
1592static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1593                                  union bnx2x_qable_obj *qo,
1594                                  struct list_head *exe_chunk,
1595                                  unsigned long *ramrod_flags)
1596{
1597        struct bnx2x_exeq_elem *elem;
1598        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1599        struct bnx2x_raw_obj *r = &o->raw;
1600        int rc, idx = 0;
1601        bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1602        bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1603        struct bnx2x_vlan_mac_registry_elem *reg_elem;
1604        enum bnx2x_vlan_mac_cmd cmd;
1605
1606        /* If DRIVER_ONLY execution is requested, cleanup a registry
1607         * and exit. Otherwise send a ramrod to FW.
1608         */
1609        if (!drv_only) {
1610                WARN_ON(r->check_pending(r));
1611
1612                /* Set pending */
1613                r->set_pending(r);
1614
1615                /* Fill the ramrod data */
1616                list_for_each_entry(elem, exe_chunk, link) {
1617                        cmd = elem->cmd_data.vlan_mac.cmd;
1618                        /* We will add to the target object in MOVE command, so
1619                         * change the object for a CAM search.
1620                         */
1621                        if (cmd == BNX2X_VLAN_MAC_MOVE)
1622                                cam_obj = elem->cmd_data.vlan_mac.target_obj;
1623                        else
1624                                cam_obj = o;
1625
1626                        rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1627                                                              elem, restore,
1628                                                              &reg_elem);
1629                        if (rc)
1630                                goto error_exit;
1631
1632                        WARN_ON(!reg_elem);
1633
1634                        /* Push a new entry into the registry */
1635                        if (!restore &&
1636                            ((cmd == BNX2X_VLAN_MAC_ADD) ||
1637                            (cmd == BNX2X_VLAN_MAC_MOVE)))
1638                                list_add(&reg_elem->link, &cam_obj->head);
1639
1640                        /* Configure a single command in a ramrod data buffer */
1641                        o->set_one_rule(bp, o, elem, idx,
1642                                        reg_elem->cam_offset);
1643
1644                        /* MOVE command consumes 2 entries in the ramrod data */
1645                        if (cmd == BNX2X_VLAN_MAC_MOVE)
1646                                idx += 2;
1647                        else
1648                                idx++;
1649                }
1650
1651                /* No need for an explicit memory barrier here as long we would
1652                 * need to ensure the ordering of writing to the SPQ element
1653                 * and updating of the SPQ producer which involves a memory
1654                 * read and we will have to put a full memory barrier there
1655                 * (inside bnx2x_sp_post()).
1656                 */
1657
1658                rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1659                                   U64_HI(r->rdata_mapping),
1660                                   U64_LO(r->rdata_mapping),
1661                                   ETH_CONNECTION_TYPE);
1662                if (rc)
1663                        goto error_exit;
1664        }
1665
1666        /* Now, when we are done with the ramrod - clean up the registry */
1667        list_for_each_entry(elem, exe_chunk, link) {
1668                cmd = elem->cmd_data.vlan_mac.cmd;
1669                if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1670                    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1671                        reg_elem = o->check_del(bp, o,
1672                                                &elem->cmd_data.vlan_mac.u);
1673
1674                        WARN_ON(!reg_elem);
1675
1676                        o->put_cam_offset(o, reg_elem->cam_offset);
1677                        list_del(&reg_elem->link);
1678                        kfree(reg_elem);
1679                }
1680        }
1681
1682        if (!drv_only)
1683                return 1;
1684        else
1685                return 0;
1686
1687error_exit:
1688        r->clear_pending(r);
1689
1690        /* Cleanup a registry in case of a failure */
1691        list_for_each_entry(elem, exe_chunk, link) {
1692                cmd = elem->cmd_data.vlan_mac.cmd;
1693
1694                if (cmd == BNX2X_VLAN_MAC_MOVE)
1695                        cam_obj = elem->cmd_data.vlan_mac.target_obj;
1696                else
1697                        cam_obj = o;
1698
1699                /* Delete all newly added above entries */
1700                if (!restore &&
1701                    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1702                    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1703                        reg_elem = o->check_del(bp, cam_obj,
1704                                                &elem->cmd_data.vlan_mac.u);
1705                        if (reg_elem) {
1706                                list_del(&reg_elem->link);
1707                                kfree(reg_elem);
1708                        }
1709                }
1710        }
1711
1712        return rc;
1713}
1714
1715static inline int bnx2x_vlan_mac_push_new_cmd(
1716        struct bnx2x *bp,
1717        struct bnx2x_vlan_mac_ramrod_params *p)
1718{
1719        struct bnx2x_exeq_elem *elem;
1720        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1721        bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1722
1723        /* Allocate the execution queue element */
1724        elem = bnx2x_exe_queue_alloc_elem(bp);
1725        if (!elem)
1726                return -ENOMEM;
1727
1728        /* Set the command 'length' */
1729        switch (p->user_req.cmd) {
1730        case BNX2X_VLAN_MAC_MOVE:
1731                elem->cmd_len = 2;
1732                break;
1733        default:
1734                elem->cmd_len = 1;
1735        }
1736
1737        /* Fill the object specific info */
1738        memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1739
1740        /* Try to add a new command to the pending list */
1741        return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1742}
1743
1744/**
1745 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1746 *
1747 * @bp:   device handle
1748 * @p:
1749 *
1750 */
1751int bnx2x_config_vlan_mac(struct bnx2x *bp,
1752                           struct bnx2x_vlan_mac_ramrod_params *p)
1753{
1754        int rc = 0;
1755        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1756        unsigned long *ramrod_flags = &p->ramrod_flags;
1757        bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1758        struct bnx2x_raw_obj *raw = &o->raw;
1759
1760        /*
1761         * Add new elements to the execution list for commands that require it.
1762         */
1763        if (!cont) {
1764                rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1765                if (rc)
1766                        return rc;
1767        }
1768
1769        /* If nothing will be executed further in this iteration we want to
1770         * return PENDING if there are pending commands
1771         */
1772        if (!bnx2x_exe_queue_empty(&o->exe_queue))
1773                rc = 1;
1774
1775        if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1776                DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1777                raw->clear_pending(raw);
1778        }
1779
1780        /* Execute commands if required */
1781        if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1782            test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1783                rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1784                                                   &p->ramrod_flags);
1785                if (rc < 0)
1786                        return rc;
1787        }
1788
1789        /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1790         * then user want to wait until the last command is done.
1791         */
1792        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1793                /* Wait maximum for the current exe_queue length iterations plus
1794                 * one (for the current pending command).
1795                 */
1796                int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1797
1798                while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1799                       max_iterations--) {
1800
1801                        /* Wait for the current command to complete */
1802                        rc = raw->wait_comp(bp, raw);
1803                        if (rc)
1804                                return rc;
1805
1806                        /* Make a next step */
1807                        rc = __bnx2x_vlan_mac_execute_step(bp,
1808                                                           p->vlan_mac_obj,
1809                                                           &p->ramrod_flags);
1810                        if (rc < 0)
1811                                return rc;
1812                }
1813
1814                return 0;
1815        }
1816
1817        return rc;
1818}
1819
1820/**
1821 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1822 *
1823 * @bp:                 device handle
1824 * @o:
1825 * @vlan_mac_flags:
1826 * @ramrod_flags:       execution flags to be used for this deletion
1827 *
1828 * if the last operation has completed successfully and there are no
1829 * more elements left, positive value if the last operation has completed
1830 * successfully and there are more previously configured elements, negative
1831 * value is current operation has failed.
1832 */
1833static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1834                                  struct bnx2x_vlan_mac_obj *o,
1835                                  unsigned long *vlan_mac_flags,
1836                                  unsigned long *ramrod_flags)
1837{
1838        struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1839        struct bnx2x_vlan_mac_ramrod_params p;
1840        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1841        struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1842        unsigned long flags;
1843        int read_lock;
1844        int rc = 0;
1845
1846        /* Clear pending commands first */
1847
1848        spin_lock_bh(&exeq->lock);
1849
1850        list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1851                flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
1852                if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1853                    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1854                        rc = exeq->remove(bp, exeq->owner, exeq_pos);
1855                        if (rc) {
1856                                BNX2X_ERR("Failed to remove command\n");
1857                                spin_unlock_bh(&exeq->lock);
1858                                return rc;
1859                        }
1860                        list_del(&exeq_pos->link);
1861                        bnx2x_exe_queue_free_elem(bp, exeq_pos);
1862                }
1863        }
1864
1865        spin_unlock_bh(&exeq->lock);
1866
1867        /* Prepare a command request */
1868        memset(&p, 0, sizeof(p));
1869        p.vlan_mac_obj = o;
1870        p.ramrod_flags = *ramrod_flags;
1871        p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1872
1873        /* Add all but the last VLAN-MAC to the execution queue without actually
1874         * execution anything.
1875         */
1876        __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1877        __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1878        __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1879
1880        DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
1881        read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
1882        if (read_lock != 0)
1883                return read_lock;
1884
1885        list_for_each_entry(pos, &o->head, link) {
1886                flags = pos->vlan_mac_flags;
1887                if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1888                    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1889                        p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1890                        memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1891                        rc = bnx2x_config_vlan_mac(bp, &p);
1892                        if (rc < 0) {
1893                                BNX2X_ERR("Failed to add a new DEL command\n");
1894                                bnx2x_vlan_mac_h_read_unlock(bp, o);
1895                                return rc;
1896                        }
1897                }
1898        }
1899
1900        DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
1901        bnx2x_vlan_mac_h_read_unlock(bp, o);
1902
1903        p.ramrod_flags = *ramrod_flags;
1904        __set_bit(RAMROD_CONT, &p.ramrod_flags);
1905
1906        return bnx2x_config_vlan_mac(bp, &p);
1907}
1908
1909static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1910        u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1911        unsigned long *pstate, bnx2x_obj_type type)
1912{
1913        raw->func_id = func_id;
1914        raw->cid = cid;
1915        raw->cl_id = cl_id;
1916        raw->rdata = rdata;
1917        raw->rdata_mapping = rdata_mapping;
1918        raw->state = state;
1919        raw->pstate = pstate;
1920        raw->obj_type = type;
1921        raw->check_pending = bnx2x_raw_check_pending;
1922        raw->clear_pending = bnx2x_raw_clear_pending;
1923        raw->set_pending = bnx2x_raw_set_pending;
1924        raw->wait_comp = bnx2x_raw_wait;
1925}
1926
1927static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1928        u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1929        int state, unsigned long *pstate, bnx2x_obj_type type,
1930        struct bnx2x_credit_pool_obj *macs_pool,
1931        struct bnx2x_credit_pool_obj *vlans_pool)
1932{
1933        INIT_LIST_HEAD(&o->head);
1934        o->head_reader = 0;
1935        o->head_exe_request = false;
1936        o->saved_ramrod_flags = 0;
1937
1938        o->macs_pool = macs_pool;
1939        o->vlans_pool = vlans_pool;
1940
1941        o->delete_all = bnx2x_vlan_mac_del_all;
1942        o->restore = bnx2x_vlan_mac_restore;
1943        o->complete = bnx2x_complete_vlan_mac;
1944        o->wait = bnx2x_wait_vlan_mac;
1945
1946        bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1947                           state, pstate, type);
1948}
1949
1950void bnx2x_init_mac_obj(struct bnx2x *bp,
1951                        struct bnx2x_vlan_mac_obj *mac_obj,
1952                        u8 cl_id, u32 cid, u8 func_id, void *rdata,
1953                        dma_addr_t rdata_mapping, int state,
1954                        unsigned long *pstate, bnx2x_obj_type type,
1955                        struct bnx2x_credit_pool_obj *macs_pool)
1956{
1957        union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1958
1959        bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1960                                   rdata_mapping, state, pstate, type,
1961                                   macs_pool, NULL);
1962
1963        /* CAM credit pool handling */
1964        mac_obj->get_credit = bnx2x_get_credit_mac;
1965        mac_obj->put_credit = bnx2x_put_credit_mac;
1966        mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1967        mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1968
1969        if (CHIP_IS_E1x(bp)) {
1970                mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1971                mac_obj->check_del         = bnx2x_check_mac_del;
1972                mac_obj->check_add         = bnx2x_check_mac_add;
1973                mac_obj->check_move        = bnx2x_check_move_always_err;
1974                mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1975
1976                /* Exe Queue */
1977                bnx2x_exe_queue_init(bp,
1978                                     &mac_obj->exe_queue, 1, qable_obj,
1979                                     bnx2x_validate_vlan_mac,
1980                                     bnx2x_remove_vlan_mac,
1981                                     bnx2x_optimize_vlan_mac,
1982                                     bnx2x_execute_vlan_mac,
1983                                     bnx2x_exeq_get_mac);
1984        } else {
1985                mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1986                mac_obj->check_del         = bnx2x_check_mac_del;
1987                mac_obj->check_add         = bnx2x_check_mac_add;
1988                mac_obj->check_move        = bnx2x_check_move;
1989                mac_obj->ramrod_cmd        =
1990                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1991                mac_obj->get_n_elements    = bnx2x_get_n_elements;
1992
1993                /* Exe Queue */
1994                bnx2x_exe_queue_init(bp,
1995                                     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1996                                     qable_obj, bnx2x_validate_vlan_mac,
1997                                     bnx2x_remove_vlan_mac,
1998                                     bnx2x_optimize_vlan_mac,
1999                                     bnx2x_execute_vlan_mac,
2000                                     bnx2x_exeq_get_mac);
2001        }
2002}
2003
2004void bnx2x_init_vlan_obj(struct bnx2x *bp,
2005                         struct bnx2x_vlan_mac_obj *vlan_obj,
2006                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
2007                         dma_addr_t rdata_mapping, int state,
2008                         unsigned long *pstate, bnx2x_obj_type type,
2009                         struct bnx2x_credit_pool_obj *vlans_pool)
2010{
2011        union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2012
2013        bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2014                                   rdata_mapping, state, pstate, type, NULL,
2015                                   vlans_pool);
2016
2017        vlan_obj->get_credit = bnx2x_get_credit_vlan;
2018        vlan_obj->put_credit = bnx2x_put_credit_vlan;
2019        vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2020        vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2021
2022        if (CHIP_IS_E1x(bp)) {
2023                BNX2X_ERR("Do not support chips others than E2 and newer\n");
2024                BUG();
2025        } else {
2026                vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2027                vlan_obj->check_del         = bnx2x_check_vlan_del;
2028                vlan_obj->check_add         = bnx2x_check_vlan_add;
2029                vlan_obj->check_move        = bnx2x_check_move;
2030                vlan_obj->ramrod_cmd        =
2031                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2032                vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2033
2034                /* Exe Queue */
2035                bnx2x_exe_queue_init(bp,
2036                                     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2037                                     qable_obj, bnx2x_validate_vlan_mac,
2038                                     bnx2x_remove_vlan_mac,
2039                                     bnx2x_optimize_vlan_mac,
2040                                     bnx2x_execute_vlan_mac,
2041                                     bnx2x_exeq_get_vlan);
2042        }
2043}
2044
2045/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2046static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2047                        struct tstorm_eth_mac_filter_config *mac_filters,
2048                        u16 pf_id)
2049{
2050        size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2051
2052        u32 addr = BAR_TSTRORM_INTMEM +
2053                        TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2054
2055        __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2056}
2057
2058static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2059                                 struct bnx2x_rx_mode_ramrod_params *p)
2060{
2061        /* update the bp MAC filter structure */
2062        u32 mask = (1 << p->cl_id);
2063
2064        struct tstorm_eth_mac_filter_config *mac_filters =
2065                (struct tstorm_eth_mac_filter_config *)p->rdata;
2066
2067        /* initial setting is drop-all */
2068        u8 drop_all_ucast = 1, drop_all_mcast = 1;
2069        u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2070        u8 unmatched_unicast = 0;
2071
2072    /* In e1x there we only take into account rx accept flag since tx switching
2073     * isn't enabled. */
2074        if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2075                /* accept matched ucast */
2076                drop_all_ucast = 0;
2077
2078        if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2079                /* accept matched mcast */
2080                drop_all_mcast = 0;
2081
2082        if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2083                /* accept all mcast */
2084                drop_all_ucast = 0;
2085                accp_all_ucast = 1;
2086        }
2087        if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2088                /* accept all mcast */
2089                drop_all_mcast = 0;
2090                accp_all_mcast = 1;
2091        }
2092        if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2093                /* accept (all) bcast */
2094                accp_all_bcast = 1;
2095        if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2096                /* accept unmatched unicasts */
2097                unmatched_unicast = 1;
2098
2099        mac_filters->ucast_drop_all = drop_all_ucast ?
2100                mac_filters->ucast_drop_all | mask :
2101                mac_filters->ucast_drop_all & ~mask;
2102
2103        mac_filters->mcast_drop_all = drop_all_mcast ?
2104                mac_filters->mcast_drop_all | mask :
2105                mac_filters->mcast_drop_all & ~mask;
2106
2107        mac_filters->ucast_accept_all = accp_all_ucast ?
2108                mac_filters->ucast_accept_all | mask :
2109                mac_filters->ucast_accept_all & ~mask;
2110
2111        mac_filters->mcast_accept_all = accp_all_mcast ?
2112                mac_filters->mcast_accept_all | mask :
2113                mac_filters->mcast_accept_all & ~mask;
2114
2115        mac_filters->bcast_accept_all = accp_all_bcast ?
2116                mac_filters->bcast_accept_all | mask :
2117                mac_filters->bcast_accept_all & ~mask;
2118
2119        mac_filters->unmatched_unicast = unmatched_unicast ?
2120                mac_filters->unmatched_unicast | mask :
2121                mac_filters->unmatched_unicast & ~mask;
2122
2123        DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2124                         "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2125           mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2126           mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2127           mac_filters->bcast_accept_all);
2128
2129        /* write the MAC filter structure*/
2130        __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2131
2132        /* The operation is completed */
2133        clear_bit(p->state, p->pstate);
2134        smp_mb__after_atomic();
2135
2136        return 0;
2137}
2138
2139/* Setup ramrod data */
2140static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2141                                struct eth_classify_header *hdr,
2142                                u8 rule_cnt)
2143{
2144        hdr->echo = cpu_to_le32(cid);
2145        hdr->rule_cnt = rule_cnt;
2146}
2147
2148static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2149                                unsigned long *accept_flags,
2150                                struct eth_filter_rules_cmd *cmd,
2151                                bool clear_accept_all)
2152{
2153        u16 state;
2154
2155        /* start with 'drop-all' */
2156        state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2157                ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2158
2159        if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2160                state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2161
2162        if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2163                state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2164
2165        if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2166                state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2167                state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2168        }
2169
2170        if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2171                state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2172                state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2173        }
2174
2175        if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2176                state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2177
2178        if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2179                state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2180                state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2181        }
2182
2183        if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2184                state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2185
2186        /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2187        if (clear_accept_all) {
2188                state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2189                state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2190                state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2191                state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2192        }
2193
2194        cmd->state = cpu_to_le16(state);
2195}
2196
2197static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2198                                struct bnx2x_rx_mode_ramrod_params *p)
2199{
2200        struct eth_filter_rules_ramrod_data *data = p->rdata;
2201        int rc;
2202        u8 rule_idx = 0;
2203
2204        /* Reset the ramrod data buffer */
2205        memset(data, 0, sizeof(*data));
2206
2207        /* Setup ramrod data */
2208
2209        /* Tx (internal switching) */
2210        if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2211                data->rules[rule_idx].client_id = p->cl_id;
2212                data->rules[rule_idx].func_id = p->func_id;
2213
2214                data->rules[rule_idx].cmd_general_data =
2215                        ETH_FILTER_RULES_CMD_TX_CMD;
2216
2217                bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2218                                               &(data->rules[rule_idx++]),
2219                                               false);
2220        }
2221
2222        /* Rx */
2223        if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2224                data->rules[rule_idx].client_id = p->cl_id;
2225                data->rules[rule_idx].func_id = p->func_id;
2226
2227                data->rules[rule_idx].cmd_general_data =
2228                        ETH_FILTER_RULES_CMD_RX_CMD;
2229
2230                bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2231                                               &(data->rules[rule_idx++]),
2232                                               false);
2233        }
2234
2235        /* If FCoE Queue configuration has been requested configure the Rx and
2236         * internal switching modes for this queue in separate rules.
2237         *
2238         * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2239         * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2240         */
2241        if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2242                /*  Tx (internal switching) */
2243                if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2244                        data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2245                        data->rules[rule_idx].func_id = p->func_id;
2246
2247                        data->rules[rule_idx].cmd_general_data =
2248                                                ETH_FILTER_RULES_CMD_TX_CMD;
2249
2250                        bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2251                                                       &(data->rules[rule_idx]),
2252                                                       true);
2253                        rule_idx++;
2254                }
2255
2256                /* Rx */
2257                if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2258                        data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2259                        data->rules[rule_idx].func_id = p->func_id;
2260
2261                        data->rules[rule_idx].cmd_general_data =
2262                                                ETH_FILTER_RULES_CMD_RX_CMD;
2263
2264                        bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2265                                                       &(data->rules[rule_idx]),
2266                                                       true);
2267                        rule_idx++;
2268                }
2269        }
2270
2271        /* Set the ramrod header (most importantly - number of rules to
2272         * configure).
2273         */
2274        bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2275
2276        DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2277                         data->header.rule_cnt, p->rx_accept_flags,
2278                         p->tx_accept_flags);
2279
2280        /* No need for an explicit memory barrier here as long as we
2281         * ensure the ordering of writing to the SPQ element
2282         * and updating of the SPQ producer which involves a memory
2283         * read. If the memory read is removed we will have to put a
2284         * full memory barrier there (inside bnx2x_sp_post()).
2285         */
2286
2287        /* Send a ramrod */
2288        rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2289                           U64_HI(p->rdata_mapping),
2290                           U64_LO(p->rdata_mapping),
2291                           ETH_CONNECTION_TYPE);
2292        if (rc)
2293                return rc;
2294
2295        /* Ramrod completion is pending */
2296        return 1;
2297}
2298
2299static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2300                                      struct bnx2x_rx_mode_ramrod_params *p)
2301{
2302        return bnx2x_state_wait(bp, p->state, p->pstate);
2303}
2304
2305static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2306                                    struct bnx2x_rx_mode_ramrod_params *p)
2307{
2308        /* Do nothing */
2309        return 0;
2310}
2311
2312int bnx2x_config_rx_mode(struct bnx2x *bp,
2313                         struct bnx2x_rx_mode_ramrod_params *p)
2314{
2315        int rc;
2316
2317        /* Configure the new classification in the chip */
2318        rc = p->rx_mode_obj->config_rx_mode(bp, p);
2319        if (rc < 0)
2320                return rc;
2321
2322        /* Wait for a ramrod completion if was requested */
2323        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2324                rc = p->rx_mode_obj->wait_comp(bp, p);
2325                if (rc)
2326                        return rc;
2327        }
2328
2329        return rc;
2330}
2331
2332void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2333                            struct bnx2x_rx_mode_obj *o)
2334{
2335        if (CHIP_IS_E1x(bp)) {
2336                o->wait_comp      = bnx2x_empty_rx_mode_wait;
2337                o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2338        } else {
2339                o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2340                o->config_rx_mode = bnx2x_set_rx_mode_e2;
2341        }
2342}
2343
2344/********************* Multicast verbs: SET, CLEAR ****************************/
2345static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2346{
2347        return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2348}
2349
2350struct bnx2x_mcast_mac_elem {
2351        struct list_head link;
2352        u8 mac[ETH_ALEN];
2353        u8 pad[2]; /* For a natural alignment of the following buffer */
2354};
2355
2356struct bnx2x_pending_mcast_cmd {
2357        struct list_head link;
2358        int type; /* BNX2X_MCAST_CMD_X */
2359        union {
2360                struct list_head macs_head;
2361                u32 macs_num; /* Needed for DEL command */
2362                int next_bin; /* Needed for RESTORE flow with aprox match */
2363        } data;
2364
2365        bool done; /* set to true, when the command has been handled,
2366                    * practically used in 57712 handling only, where one pending
2367                    * command may be handled in a few operations. As long as for
2368                    * other chips every operation handling is completed in a
2369                    * single ramrod, there is no need to utilize this field.
2370                    */
2371};
2372
2373static int bnx2x_mcast_wait(struct bnx2x *bp,
2374                            struct bnx2x_mcast_obj *o)
2375{
2376        if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2377                        o->raw.wait_comp(bp, &o->raw))
2378                return -EBUSY;
2379
2380        return 0;
2381}
2382
2383static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2384                                   struct bnx2x_mcast_obj *o,
2385                                   struct bnx2x_mcast_ramrod_params *p,
2386                                   enum bnx2x_mcast_cmd cmd)
2387{
2388        int total_sz;
2389        struct bnx2x_pending_mcast_cmd *new_cmd;
2390        struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2391        struct bnx2x_mcast_list_elem *pos;
2392        int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2393                             p->mcast_list_len : 0);
2394
2395        /* If the command is empty ("handle pending commands only"), break */
2396        if (!p->mcast_list_len)
2397                return 0;
2398
2399        total_sz = sizeof(*new_cmd) +
2400                macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2401
2402        /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2403        new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2404
2405        if (!new_cmd)
2406                return -ENOMEM;
2407
2408        DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2409           cmd, macs_list_len);
2410
2411        INIT_LIST_HEAD(&new_cmd->data.macs_head);
2412
2413        new_cmd->type = cmd;
2414        new_cmd->done = false;
2415
2416        switch (cmd) {
2417        case BNX2X_MCAST_CMD_ADD:
2418                cur_mac = (struct bnx2x_mcast_mac_elem *)
2419                          ((u8 *)new_cmd + sizeof(*new_cmd));
2420
2421                /* Push the MACs of the current command into the pending command
2422                 * MACs list: FIFO
2423                 */
2424                list_for_each_entry(pos, &p->mcast_list, link) {
2425                        memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2426                        list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2427                        cur_mac++;
2428                }
2429
2430                break;
2431
2432        case BNX2X_MCAST_CMD_DEL:
2433                new_cmd->data.macs_num = p->mcast_list_len;
2434                break;
2435
2436        case BNX2X_MCAST_CMD_RESTORE:
2437                new_cmd->data.next_bin = 0;
2438                break;
2439
2440        default:
2441                kfree(new_cmd);
2442                BNX2X_ERR("Unknown command: %d\n", cmd);
2443                return -EINVAL;
2444        }
2445
2446        /* Push the new pending command to the tail of the pending list: FIFO */
2447        list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2448
2449        o->set_sched(o);
2450
2451        return 1;
2452}
2453
2454/**
2455 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2456 *
2457 * @o:
2458 * @last:       index to start looking from (including)
2459 *
2460 * returns the next found (set) bin or a negative value if none is found.
2461 */
2462static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2463{
2464        int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2465
2466        for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2467                if (o->registry.aprox_match.vec[i])
2468                        for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2469                                int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2470                                if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2471                                                       vec, cur_bit)) {
2472                                        return cur_bit;
2473                                }
2474                        }
2475                inner_start = 0;
2476        }
2477
2478        /* None found */
2479        return -1;
2480}
2481
2482/**
2483 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2484 *
2485 * @o:
2486 *
2487 * returns the index of the found bin or -1 if none is found
2488 */
2489static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2490{
2491        int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2492
2493        if (cur_bit >= 0)
2494                BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2495
2496        return cur_bit;
2497}
2498
2499static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2500{
2501        struct bnx2x_raw_obj *raw = &o->raw;
2502        u8 rx_tx_flag = 0;
2503
2504        if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2505            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2506                rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2507
2508        if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2509            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2510                rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2511
2512        return rx_tx_flag;
2513}
2514
2515static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2516                                        struct bnx2x_mcast_obj *o, int idx,
2517                                        union bnx2x_mcast_config_data *cfg_data,
2518                                        enum bnx2x_mcast_cmd cmd)
2519{
2520        struct bnx2x_raw_obj *r = &o->raw;
2521        struct eth_multicast_rules_ramrod_data *data =
2522                (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2523        u8 func_id = r->func_id;
2524        u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2525        int bin;
2526
2527        if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2528                rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2529
2530        data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2531
2532        /* Get a bin and update a bins' vector */
2533        switch (cmd) {
2534        case BNX2X_MCAST_CMD_ADD:
2535                bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2536                BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2537                break;
2538
2539        case BNX2X_MCAST_CMD_DEL:
2540                /* If there were no more bins to clear
2541                 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2542                 * clear any (0xff) bin.
2543                 * See bnx2x_mcast_validate_e2() for explanation when it may
2544                 * happen.
2545                 */
2546                bin = bnx2x_mcast_clear_first_bin(o);
2547                break;
2548
2549        case BNX2X_MCAST_CMD_RESTORE:
2550                bin = cfg_data->bin;
2551                break;
2552
2553        default:
2554                BNX2X_ERR("Unknown command: %d\n", cmd);
2555                return;
2556        }
2557
2558        DP(BNX2X_MSG_SP, "%s bin %d\n",
2559                         ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2560                         "Setting"  : "Clearing"), bin);
2561
2562        data->rules[idx].bin_id    = (u8)bin;
2563        data->rules[idx].func_id   = func_id;
2564        data->rules[idx].engine_id = o->engine_id;
2565}
2566
2567/**
2568 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2569 *
2570 * @bp:         device handle
2571 * @o:
2572 * @start_bin:  index in the registry to start from (including)
2573 * @rdata_idx:  index in the ramrod data to start from
2574 *
2575 * returns last handled bin index or -1 if all bins have been handled
2576 */
2577static inline int bnx2x_mcast_handle_restore_cmd_e2(
2578        struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2579        int *rdata_idx)
2580{
2581        int cur_bin, cnt = *rdata_idx;
2582        union bnx2x_mcast_config_data cfg_data = {NULL};
2583
2584        /* go through the registry and configure the bins from it */
2585        for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2586            cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2587
2588                cfg_data.bin = (u8)cur_bin;
2589                o->set_one_rule(bp, o, cnt, &cfg_data,
2590                                BNX2X_MCAST_CMD_RESTORE);
2591
2592                cnt++;
2593
2594                DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2595
2596                /* Break if we reached the maximum number
2597                 * of rules.
2598                 */
2599                if (cnt >= o->max_cmd_len)
2600                        break;
2601        }
2602
2603        *rdata_idx = cnt;
2604
2605        return cur_bin;
2606}
2607
2608static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2609        struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2610        int *line_idx)
2611{
2612        struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2613        int cnt = *line_idx;
2614        union bnx2x_mcast_config_data cfg_data = {NULL};
2615
2616        list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2617                                 link) {
2618
2619                cfg_data.mac = &pmac_pos->mac[0];
2620                o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2621
2622                cnt++;
2623
2624                DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2625                   pmac_pos->mac);
2626
2627                list_del(&pmac_pos->link);
2628
2629                /* Break if we reached the maximum number
2630                 * of rules.
2631                 */
2632                if (cnt >= o->max_cmd_len)
2633                        break;
2634        }
2635
2636        *line_idx = cnt;
2637
2638        /* if no more MACs to configure - we are done */
2639        if (list_empty(&cmd_pos->data.macs_head))
2640                cmd_pos->done = true;
2641}
2642
2643static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2644        struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2645        int *line_idx)
2646{
2647        int cnt = *line_idx;
2648
2649        while (cmd_pos->data.macs_num) {
2650                o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2651
2652                cnt++;
2653
2654                cmd_pos->data.macs_num--;
2655
2656                  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2657                                   cmd_pos->data.macs_num, cnt);
2658
2659                /* Break if we reached the maximum
2660                 * number of rules.
2661                 */
2662                if (cnt >= o->max_cmd_len)
2663                        break;
2664        }
2665
2666        *line_idx = cnt;
2667
2668        /* If we cleared all bins - we are done */
2669        if (!cmd_pos->data.macs_num)
2670                cmd_pos->done = true;
2671}
2672
2673static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2674        struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2675        int *line_idx)
2676{
2677        cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2678                                                line_idx);
2679
2680        if (cmd_pos->data.next_bin < 0)
2681                /* If o->set_restore returned -1 we are done */
2682                cmd_pos->done = true;
2683        else
2684                /* Start from the next bin next time */
2685                cmd_pos->data.next_bin++;
2686}
2687
2688static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2689                                struct bnx2x_mcast_ramrod_params *p)
2690{
2691        struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2692        int cnt = 0;
2693        struct bnx2x_mcast_obj *o = p->mcast_obj;
2694
2695        list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2696                                 link) {
2697                switch (cmd_pos->type) {
2698                case BNX2X_MCAST_CMD_ADD:
2699                        bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2700                        break;
2701
2702                case BNX2X_MCAST_CMD_DEL:
2703                        bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2704                        break;
2705
2706                case BNX2X_MCAST_CMD_RESTORE:
2707                        bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2708                                                           &cnt);
2709                        break;
2710
2711                default:
2712                        BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2713                        return -EINVAL;
2714                }
2715
2716                /* If the command has been completed - remove it from the list
2717                 * and free the memory
2718                 */
2719                if (cmd_pos->done) {
2720                        list_del(&cmd_pos->link);
2721                        kfree(cmd_pos);
2722                }
2723
2724                /* Break if we reached the maximum number of rules */
2725                if (cnt >= o->max_cmd_len)
2726                        break;
2727        }
2728
2729        return cnt;
2730}
2731
2732static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2733        struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2734        int *line_idx)
2735{
2736        struct bnx2x_mcast_list_elem *mlist_pos;
2737        union bnx2x_mcast_config_data cfg_data = {NULL};
2738        int cnt = *line_idx;
2739
2740        list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2741                cfg_data.mac = mlist_pos->mac;
2742                o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2743
2744                cnt++;
2745
2746                DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2747                   mlist_pos->mac);
2748        }
2749
2750        *line_idx = cnt;
2751}
2752
2753static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2754        struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2755        int *line_idx)
2756{
2757        int cnt = *line_idx, i;
2758
2759        for (i = 0; i < p->mcast_list_len; i++) {
2760                o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2761
2762                cnt++;
2763
2764                DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2765                                 p->mcast_list_len - i - 1);
2766        }
2767
2768        *line_idx = cnt;
2769}
2770
2771/**
2772 * bnx2x_mcast_handle_current_cmd -
2773 *
2774 * @bp:         device handle
2775 * @p:
2776 * @cmd:
2777 * @start_cnt:  first line in the ramrod data that may be used
2778 *
2779 * This function is called iff there is enough place for the current command in
2780 * the ramrod data.
2781 * Returns number of lines filled in the ramrod data in total.
2782 */
2783static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2784                        struct bnx2x_mcast_ramrod_params *p,
2785                        enum bnx2x_mcast_cmd cmd,
2786                        int start_cnt)
2787{
2788        struct bnx2x_mcast_obj *o = p->mcast_obj;
2789        int cnt = start_cnt;
2790
2791        DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2792
2793        switch (cmd) {
2794        case BNX2X_MCAST_CMD_ADD:
2795                bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2796                break;
2797
2798        case BNX2X_MCAST_CMD_DEL:
2799                bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2800                break;
2801
2802        case BNX2X_MCAST_CMD_RESTORE:
2803                o->hdl_restore(bp, o, 0, &cnt);
2804                break;
2805
2806        default:
2807                BNX2X_ERR("Unknown command: %d\n", cmd);
2808                return -EINVAL;
2809        }
2810
2811        /* The current command has been handled */
2812        p->mcast_list_len = 0;
2813
2814        return cnt;
2815}
2816
2817static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2818                                   struct bnx2x_mcast_ramrod_params *p,
2819                                   enum bnx2x_mcast_cmd cmd)
2820{
2821        struct bnx2x_mcast_obj *o = p->mcast_obj;
2822        int reg_sz = o->get_registry_size(o);
2823
2824        switch (cmd) {
2825        /* DEL command deletes all currently configured MACs */
2826        case BNX2X_MCAST_CMD_DEL:
2827                o->set_registry_size(o, 0);
2828                /* Don't break */
2829
2830        /* RESTORE command will restore the entire multicast configuration */
2831        case BNX2X_MCAST_CMD_RESTORE:
2832                /* Here we set the approximate amount of work to do, which in
2833                 * fact may be only less as some MACs in postponed ADD
2834                 * command(s) scheduled before this command may fall into
2835                 * the same bin and the actual number of bins set in the
2836                 * registry would be less than we estimated here. See
2837                 * bnx2x_mcast_set_one_rule_e2() for further details.
2838                 */
2839                p->mcast_list_len = reg_sz;
2840                break;
2841
2842        case BNX2X_MCAST_CMD_ADD:
2843        case BNX2X_MCAST_CMD_CONT:
2844                /* Here we assume that all new MACs will fall into new bins.
2845                 * However we will correct the real registry size after we
2846                 * handle all pending commands.
2847                 */
2848                o->set_registry_size(o, reg_sz + p->mcast_list_len);
2849                break;
2850
2851        default:
2852                BNX2X_ERR("Unknown command: %d\n", cmd);
2853                return -EINVAL;
2854        }
2855
2856        /* Increase the total number of MACs pending to be configured */
2857        o->total_pending_num += p->mcast_list_len;
2858
2859        return 0;
2860}
2861
2862static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2863                                      struct bnx2x_mcast_ramrod_params *p,
2864                                      int old_num_bins)
2865{
2866        struct bnx2x_mcast_obj *o = p->mcast_obj;
2867
2868        o->set_registry_size(o, old_num_bins);
2869        o->total_pending_num -= p->mcast_list_len;
2870}
2871
2872/**
2873 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2874 *
2875 * @bp:         device handle
2876 * @p:
2877 * @len:        number of rules to handle
2878 */
2879static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2880                                        struct bnx2x_mcast_ramrod_params *p,
2881                                        u8 len)
2882{
2883        struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2884        struct eth_multicast_rules_ramrod_data *data =
2885                (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2886
2887        data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2888                                        (BNX2X_FILTER_MCAST_PENDING <<
2889                                         BNX2X_SWCID_SHIFT));
2890        data->header.rule_cnt = len;
2891}
2892
2893/**
2894 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2895 *
2896 * @bp:         device handle
2897 * @o:
2898 *
2899 * Recalculate the actual number of set bins in the registry using Brian
2900 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2901 *
2902 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2903 */
2904static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2905                                                  struct bnx2x_mcast_obj *o)
2906{
2907        int i, cnt = 0;
2908        u64 elem;
2909
2910        for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2911                elem = o->registry.aprox_match.vec[i];
2912                for (; elem; cnt++)
2913                        elem &= elem - 1;
2914        }
2915
2916        o->set_registry_size(o, cnt);
2917
2918        return 0;
2919}
2920
2921static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2922                                struct bnx2x_mcast_ramrod_params *p,
2923                                enum bnx2x_mcast_cmd cmd)
2924{
2925        struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2926        struct bnx2x_mcast_obj *o = p->mcast_obj;
2927        struct eth_multicast_rules_ramrod_data *data =
2928                (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2929        int cnt = 0, rc;
2930
2931        /* Reset the ramrod data buffer */
2932        memset(data, 0, sizeof(*data));
2933
2934        cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2935
2936        /* If there are no more pending commands - clear SCHEDULED state */
2937        if (list_empty(&o->pending_cmds_head))
2938                o->clear_sched(o);
2939
2940        /* The below may be true iff there was enough room in ramrod
2941         * data for all pending commands and for the current
2942         * command. Otherwise the current command would have been added
2943         * to the pending commands and p->mcast_list_len would have been
2944         * zeroed.
2945         */
2946        if (p->mcast_list_len > 0)
2947                cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2948
2949        /* We've pulled out some MACs - update the total number of
2950         * outstanding.
2951         */
2952        o->total_pending_num -= cnt;
2953
2954        /* send a ramrod */
2955        WARN_ON(o->total_pending_num < 0);
2956        WARN_ON(cnt > o->max_cmd_len);
2957
2958        bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2959
2960        /* Update a registry size if there are no more pending operations.
2961         *
2962         * We don't want to change the value of the registry size if there are
2963         * pending operations because we want it to always be equal to the
2964         * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2965         * set bins after the last requested operation in order to properly
2966         * evaluate the size of the next DEL/RESTORE operation.
2967         *
2968         * Note that we update the registry itself during command(s) handling
2969         * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2970         * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2971         * with a limited amount of update commands (per MAC/bin) and we don't
2972         * know in this scope what the actual state of bins configuration is
2973         * going to be after this ramrod.
2974         */
2975        if (!o->total_pending_num)
2976                bnx2x_mcast_refresh_registry_e2(bp, o);
2977
2978        /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2979         * RAMROD_PENDING status immediately.
2980         */
2981        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2982                raw->clear_pending(raw);
2983                return 0;
2984        } else {
2985                /* No need for an explicit memory barrier here as long as we
2986                 * ensure the ordering of writing to the SPQ element
2987                 * and updating of the SPQ producer which involves a memory
2988                 * read. If the memory read is removed we will have to put a
2989                 * full memory barrier there (inside bnx2x_sp_post()).
2990                 */
2991
2992                /* Send a ramrod */
2993                rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2994                                   raw->cid, U64_HI(raw->rdata_mapping),
2995                                   U64_LO(raw->rdata_mapping),
2996                                   ETH_CONNECTION_TYPE);
2997                if (rc)
2998                        return rc;
2999
3000                /* Ramrod completion is pending */
3001                return 1;
3002        }
3003}
3004
3005static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3006                                    struct bnx2x_mcast_ramrod_params *p,
3007                                    enum bnx2x_mcast_cmd cmd)
3008{
3009        /* Mark, that there is a work to do */
3010        if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3011                p->mcast_list_len = 1;
3012
3013        return 0;
3014}
3015
3016static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3017                                       struct bnx2x_mcast_ramrod_params *p,
3018                                       int old_num_bins)
3019{
3020        /* Do nothing */
3021}
3022
3023#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3024do { \
3025        (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3026} while (0)
3027
3028static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3029                                           struct bnx2x_mcast_obj *o,
3030                                           struct bnx2x_mcast_ramrod_params *p,
3031                                           u32 *mc_filter)
3032{
3033        struct bnx2x_mcast_list_elem *mlist_pos;
3034        int bit;
3035
3036        list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3037                bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3038                BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3039
3040                DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3041                   mlist_pos->mac, bit);
3042
3043                /* bookkeeping... */
3044                BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3045                                  bit);
3046        }
3047}
3048
3049static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3050        struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3051        u32 *mc_filter)
3052{
3053        int bit;
3054
3055        for (bit = bnx2x_mcast_get_next_bin(o, 0);
3056             bit >= 0;
3057             bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3058                BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3059                DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3060        }
3061}
3062
3063/* On 57711 we write the multicast MACs' approximate match
3064 * table by directly into the TSTORM's internal RAM. So we don't
3065 * really need to handle any tricks to make it work.
3066 */
3067static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3068                                 struct bnx2x_mcast_ramrod_params *p,
3069                                 enum bnx2x_mcast_cmd cmd)
3070{
3071        int i;
3072        struct bnx2x_mcast_obj *o = p->mcast_obj;
3073        struct bnx2x_raw_obj *r = &o->raw;
3074
3075        /* If CLEAR_ONLY has been requested - clear the registry
3076         * and clear a pending bit.
3077         */
3078        if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3079                u32 mc_filter[MC_HASH_SIZE] = {0};
3080
3081                /* Set the multicast filter bits before writing it into
3082                 * the internal memory.
3083                 */
3084                switch (cmd) {
3085                case BNX2X_MCAST_CMD_ADD:
3086                        bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3087                        break;
3088
3089                case BNX2X_MCAST_CMD_DEL:
3090                        DP(BNX2X_MSG_SP,
3091                           "Invalidating multicast MACs configuration\n");
3092
3093                        /* clear the registry */
3094                        memset(o->registry.aprox_match.vec, 0,
3095                               sizeof(o->registry.aprox_match.vec));
3096                        break;
3097
3098                case BNX2X_MCAST_CMD_RESTORE:
3099                        bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3100                        break;
3101
3102                default:
3103                        BNX2X_ERR("Unknown command: %d\n", cmd);
3104                        return -EINVAL;
3105                }
3106
3107                /* Set the mcast filter in the internal memory */
3108                for (i = 0; i < MC_HASH_SIZE; i++)
3109                        REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3110        } else
3111                /* clear the registry */
3112                memset(o->registry.aprox_match.vec, 0,
3113                       sizeof(o->registry.aprox_match.vec));
3114
3115        /* We are done */
3116        r->clear_pending(r);
3117
3118        return 0;
3119}
3120
3121static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3122                                   struct bnx2x_mcast_ramrod_params *p,
3123                                   enum bnx2x_mcast_cmd cmd)
3124{
3125        struct bnx2x_mcast_obj *o = p->mcast_obj;
3126        int reg_sz = o->get_registry_size(o);
3127
3128        switch (cmd) {
3129        /* DEL command deletes all currently configured MACs */
3130        case BNX2X_MCAST_CMD_DEL:
3131                o->set_registry_size(o, 0);
3132                /* Don't break */
3133
3134        /* RESTORE command will restore the entire multicast configuration */
3135        case BNX2X_MCAST_CMD_RESTORE:
3136                p->mcast_list_len = reg_sz;
3137                  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3138                                   cmd, p->mcast_list_len);
3139                break;
3140
3141        case BNX2X_MCAST_CMD_ADD:
3142        case BNX2X_MCAST_CMD_CONT:
3143                /* Multicast MACs on 57710 are configured as unicast MACs and
3144                 * there is only a limited number of CAM entries for that
3145                 * matter.
3146                 */
3147                if (p->mcast_list_len > o->max_cmd_len) {
3148                        BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3149                                  o->max_cmd_len);
3150                        return -EINVAL;
3151                }
3152                /* Every configured MAC should be cleared if DEL command is
3153                 * called. Only the last ADD command is relevant as long as
3154                 * every ADD commands overrides the previous configuration.
3155                 */
3156                DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3157                if (p->mcast_list_len > 0)
3158                        o->set_registry_size(o, p->mcast_list_len);
3159
3160                break;
3161
3162        default:
3163                BNX2X_ERR("Unknown command: %d\n", cmd);
3164                return -EINVAL;
3165        }
3166
3167        /* We want to ensure that commands are executed one by one for 57710.
3168         * Therefore each none-empty command will consume o->max_cmd_len.
3169         */
3170        if (p->mcast_list_len)
3171                o->total_pending_num += o->max_cmd_len;
3172
3173        return 0;
3174}
3175
3176static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3177                                      struct bnx2x_mcast_ramrod_params *p,
3178                                      int old_num_macs)
3179{
3180        struct bnx2x_mcast_obj *o = p->mcast_obj;
3181
3182        o->set_registry_size(o, old_num_macs);
3183
3184        /* If current command hasn't been handled yet and we are
3185         * here means that it's meant to be dropped and we have to
3186         * update the number of outstanding MACs accordingly.
3187         */
3188        if (p->mcast_list_len)
3189                o->total_pending_num -= o->max_cmd_len;
3190}
3191
3192static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3193                                        struct bnx2x_mcast_obj *o, int idx,
3194                                        union bnx2x_mcast_config_data *cfg_data,
3195                                        enum bnx2x_mcast_cmd cmd)
3196{
3197        struct bnx2x_raw_obj *r = &o->raw;
3198        struct mac_configuration_cmd *data =
3199                (struct mac_configuration_cmd *)(r->rdata);
3200
3201        /* copy mac */
3202        if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3203                bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3204                                      &data->config_table[idx].middle_mac_addr,
3205                                      &data->config_table[idx].lsb_mac_addr,
3206                                      cfg_data->mac);
3207
3208                data->config_table[idx].vlan_id = 0;
3209                data->config_table[idx].pf_id = r->func_id;
3210                data->config_table[idx].clients_bit_vector =
3211                        cpu_to_le32(1 << r->cl_id);
3212
3213                SET_FLAG(data->config_table[idx].flags,
3214                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3215                         T_ETH_MAC_COMMAND_SET);
3216        }
3217}
3218
3219/**
3220 * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3221 *
3222 * @bp:         device handle
3223 * @p:
3224 * @len:        number of rules to handle
3225 */
3226static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3227                                        struct bnx2x_mcast_ramrod_params *p,
3228                                        u8 len)
3229{
3230        struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3231        struct mac_configuration_cmd *data =
3232                (struct mac_configuration_cmd *)(r->rdata);
3233
3234        u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3235                     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3236                     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3237
3238        data->hdr.offset = offset;
3239        data->hdr.client_id = cpu_to_le16(0xff);
3240        data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3241                                     (BNX2X_FILTER_MCAST_PENDING <<
3242                                      BNX2X_SWCID_SHIFT));
3243        data->hdr.length = len;
3244}
3245
3246/**
3247 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3248 *
3249 * @bp:         device handle
3250 * @o:
3251 * @start_idx:  index in the registry to start from
3252 * @rdata_idx:  index in the ramrod data to start from
3253 *
3254 * restore command for 57710 is like all other commands - always a stand alone
3255 * command - start_idx and rdata_idx will always be 0. This function will always
3256 * succeed.
3257 * returns -1 to comply with 57712 variant.
3258 */
3259static inline int bnx2x_mcast_handle_restore_cmd_e1(
3260        struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3261        int *rdata_idx)
3262{
3263        struct bnx2x_mcast_mac_elem *elem;
3264        int i = 0;
3265        union bnx2x_mcast_config_data cfg_data = {NULL};
3266
3267        /* go through the registry and configure the MACs from it. */
3268        list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3269                cfg_data.mac = &elem->mac[0];
3270                o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3271
3272                i++;
3273
3274                  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3275                     cfg_data.mac);
3276        }
3277
3278        *rdata_idx = i;
3279
3280        return -1;
3281}
3282
3283static inline int bnx2x_mcast_handle_pending_cmds_e1(
3284        struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3285{
3286        struct bnx2x_pending_mcast_cmd *cmd_pos;
3287        struct bnx2x_mcast_mac_elem *pmac_pos;
3288        struct bnx2x_mcast_obj *o = p->mcast_obj;
3289        union bnx2x_mcast_config_data cfg_data = {NULL};
3290        int cnt = 0;
3291
3292        /* If nothing to be done - return */
3293        if (list_empty(&o->pending_cmds_head))
3294                return 0;
3295
3296        /* Handle the first command */
3297        cmd_pos = list_first_entry(&o->pending_cmds_head,
3298                                   struct bnx2x_pending_mcast_cmd, link);
3299
3300        switch (cmd_pos->type) {
3301        case BNX2X_MCAST_CMD_ADD:
3302                list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3303                        cfg_data.mac = &pmac_pos->mac[0];
3304                        o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3305
3306                        cnt++;
3307
3308                        DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3309                           pmac_pos->mac);
3310                }
3311                break;
3312
3313        case BNX2X_MCAST_CMD_DEL:
3314                cnt = cmd_pos->data.macs_num;
3315                DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3316                break;
3317
3318        case BNX2X_MCAST_CMD_RESTORE:
3319                o->hdl_restore(bp, o, 0, &cnt);
3320                break;
3321
3322        default:
3323                BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3324                return -EINVAL;
3325        }
3326
3327        list_del(&cmd_pos->link);
3328        kfree(cmd_pos);
3329
3330        return cnt;
3331}
3332
3333/**
3334 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3335 *
3336 * @fw_hi:
3337 * @fw_mid:
3338 * @fw_lo:
3339 * @mac:
3340 */
3341static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3342                                         __le16 *fw_lo, u8 *mac)
3343{
3344        mac[1] = ((u8 *)fw_hi)[0];
3345        mac[0] = ((u8 *)fw_hi)[1];
3346        mac[3] = ((u8 *)fw_mid)[0];
3347        mac[2] = ((u8 *)fw_mid)[1];
3348        mac[5] = ((u8 *)fw_lo)[0];
3349        mac[4] = ((u8 *)fw_lo)[1];
3350}
3351
3352/**
3353 * bnx2x_mcast_refresh_registry_e1 -
3354 *
3355 * @bp:         device handle
3356 * @cnt:
3357 *
3358 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3359 * and update the registry correspondingly: if ADD - allocate a memory and add
3360 * the entries to the registry (list), if DELETE - clear the registry and free
3361 * the memory.
3362 */
3363static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3364                                                  struct bnx2x_mcast_obj *o)
3365{
3366        struct bnx2x_raw_obj *raw = &o->raw;
3367        struct bnx2x_mcast_mac_elem *elem;
3368        struct mac_configuration_cmd *data =
3369                        (struct mac_configuration_cmd *)(raw->rdata);
3370
3371        /* If first entry contains a SET bit - the command was ADD,
3372         * otherwise - DEL_ALL
3373         */
3374        if (GET_FLAG(data->config_table[0].flags,
3375                        MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3376                int i, len = data->hdr.length;
3377
3378                /* Break if it was a RESTORE command */
3379                if (!list_empty(&o->registry.exact_match.macs))
3380                        return 0;
3381
3382                elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3383                if (!elem) {
3384                        BNX2X_ERR("Failed to allocate registry memory\n");
3385                        return -ENOMEM;
3386                }
3387
3388                for (i = 0; i < len; i++, elem++) {
3389                        bnx2x_get_fw_mac_addr(
3390                                &data->config_table[i].msb_mac_addr,
3391                                &data->config_table[i].middle_mac_addr,
3392                                &data->config_table[i].lsb_mac_addr,
3393                                elem->mac);
3394                        DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3395                           elem->mac);
3396                        list_add_tail(&elem->link,
3397                                      &o->registry.exact_match.macs);
3398                }
3399        } else {
3400                elem = list_first_entry(&o->registry.exact_match.macs,
3401                                        struct bnx2x_mcast_mac_elem, link);
3402                DP(BNX2X_MSG_SP, "Deleting a registry\n");
3403                kfree(elem);
3404                INIT_LIST_HEAD(&o->registry.exact_match.macs);
3405        }
3406
3407        return 0;
3408}
3409
3410static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3411                                struct bnx2x_mcast_ramrod_params *p,
3412                                enum bnx2x_mcast_cmd cmd)
3413{
3414        struct bnx2x_mcast_obj *o = p->mcast_obj;
3415        struct bnx2x_raw_obj *raw = &o->raw;
3416        struct mac_configuration_cmd *data =
3417                (struct mac_configuration_cmd *)(raw->rdata);
3418        int cnt = 0, i, rc;
3419
3420        /* Reset the ramrod data buffer */
3421        memset(data, 0, sizeof(*data));
3422
3423        /* First set all entries as invalid */
3424        for (i = 0; i < o->max_cmd_len ; i++)
3425                SET_FLAG(data->config_table[i].flags,
3426                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3427                         T_ETH_MAC_COMMAND_INVALIDATE);
3428
3429        /* Handle pending commands first */
3430        cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3431
3432        /* If there are no more pending commands - clear SCHEDULED state */
3433        if (list_empty(&o->pending_cmds_head))
3434                o->clear_sched(o);
3435
3436        /* The below may be true iff there were no pending commands */
3437        if (!cnt)
3438                cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3439
3440        /* For 57710 every command has o->max_cmd_len length to ensure that
3441         * commands are done one at a time.
3442         */
3443        o->total_pending_num -= o->max_cmd_len;
3444
3445        /* send a ramrod */
3446
3447        WARN_ON(cnt > o->max_cmd_len);
3448
3449        /* Set ramrod header (in particular, a number of entries to update) */
3450        bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3451
3452        /* update a registry: we need the registry contents to be always up
3453         * to date in order to be able to execute a RESTORE opcode. Here
3454         * we use the fact that for 57710 we sent one command at a time
3455         * hence we may take the registry update out of the command handling
3456         * and do it in a simpler way here.
3457         */
3458        rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3459        if (rc)
3460                return rc;
3461
3462        /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3463         * RAMROD_PENDING status immediately.
3464         */
3465        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3466                raw->clear_pending(raw);
3467                return 0;
3468        } else {
3469                /* No need for an explicit memory barrier here as long as we
3470                 * ensure the ordering of writing to the SPQ element
3471                 * and updating of the SPQ producer which involves a memory
3472                 * read. If the memory read is removed we will have to put a
3473                 * full memory barrier there (inside bnx2x_sp_post()).
3474                 */
3475
3476                /* Send a ramrod */
3477                rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3478                                   U64_HI(raw->rdata_mapping),
3479                                   U64_LO(raw->rdata_mapping),
3480                                   ETH_CONNECTION_TYPE);
3481                if (rc)
3482                        return rc;
3483
3484                /* Ramrod completion is pending */
3485                return 1;
3486        }
3487}
3488
3489static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3490{
3491        return o->registry.exact_match.num_macs_set;
3492}
3493
3494static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3495{
3496        return o->registry.aprox_match.num_bins_set;
3497}
3498
3499static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3500                                                int n)
3501{
3502        o->registry.exact_match.num_macs_set = n;
3503}
3504
3505static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3506                                                int n)
3507{
3508        o->registry.aprox_match.num_bins_set = n;
3509}
3510
3511int bnx2x_config_mcast(struct bnx2x *bp,
3512                       struct bnx2x_mcast_ramrod_params *p,
3513                       enum bnx2x_mcast_cmd cmd)
3514{
3515        struct bnx2x_mcast_obj *o = p->mcast_obj;
3516        struct bnx2x_raw_obj *r = &o->raw;
3517        int rc = 0, old_reg_size;
3518
3519        /* This is needed to recover number of currently configured mcast macs
3520         * in case of failure.
3521         */
3522        old_reg_size = o->get_registry_size(o);
3523
3524        /* Do some calculations and checks */
3525        rc = o->validate(bp, p, cmd);
3526        if (rc)
3527                return rc;
3528
3529        /* Return if there is no work to do */
3530        if ((!p->mcast_list_len) && (!o->check_sched(o)))
3531                return 0;
3532
3533        DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3534           o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3535
3536        /* Enqueue the current command to the pending list if we can't complete
3537         * it in the current iteration
3538         */
3539        if (r->check_pending(r) ||
3540            ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3541                rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3542                if (rc < 0)
3543                        goto error_exit1;
3544
3545                /* As long as the current command is in a command list we
3546                 * don't need to handle it separately.
3547                 */
3548                p->mcast_list_len = 0;
3549        }
3550
3551        if (!r->check_pending(r)) {
3552
3553                /* Set 'pending' state */
3554                r->set_pending(r);
3555
3556                /* Configure the new classification in the chip */
3557                rc = o->config_mcast(bp, p, cmd);
3558                if (rc < 0)
3559                        goto error_exit2;
3560
3561                /* Wait for a ramrod completion if was requested */
3562                if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3563                        rc = o->wait_comp(bp, o);
3564        }
3565
3566        return rc;
3567
3568error_exit2:
3569        r->clear_pending(r);
3570
3571error_exit1:
3572        o->revert(bp, p, old_reg_size);
3573
3574        return rc;
3575}
3576
3577static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3578{
3579        smp_mb__before_atomic();
3580        clear_bit(o->sched_state, o->raw.pstate);
3581        smp_mb__after_atomic();
3582}
3583
3584static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3585{
3586        smp_mb__before_atomic();
3587        set_bit(o->sched_state, o->raw.pstate);
3588        smp_mb__after_atomic();
3589}
3590
3591static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3592{
3593        return !!test_bit(o->sched_state, o->raw.pstate);
3594}
3595
3596static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3597{
3598        return o->raw.check_pending(&o->raw) || o->check_sched(o);
3599}
3600
3601void bnx2x_init_mcast_obj(struct bnx2x *bp,
3602                          struct bnx2x_mcast_obj *mcast_obj,
3603                          u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3604                          u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3605                          int state, unsigned long *pstate, bnx2x_obj_type type)
3606{
3607        memset(mcast_obj, 0, sizeof(*mcast_obj));
3608
3609        bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3610                           rdata, rdata_mapping, state, pstate, type);
3611
3612        mcast_obj->engine_id = engine_id;
3613
3614        INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3615
3616        mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3617        mcast_obj->check_sched = bnx2x_mcast_check_sched;
3618        mcast_obj->set_sched = bnx2x_mcast_set_sched;
3619        mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3620
3621        if (CHIP_IS_E1(bp)) {
3622                mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3623                mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3624                mcast_obj->hdl_restore       =
3625                        bnx2x_mcast_handle_restore_cmd_e1;
3626                mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3627
3628                if (CHIP_REV_IS_SLOW(bp))
3629                        mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3630                else
3631                        mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3632
3633                mcast_obj->wait_comp         = bnx2x_mcast_wait;
3634                mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3635                mcast_obj->validate          = bnx2x_mcast_validate_e1;
3636                mcast_obj->revert            = bnx2x_mcast_revert_e1;
3637                mcast_obj->get_registry_size =
3638                        bnx2x_mcast_get_registry_size_exact;
3639                mcast_obj->set_registry_size =
3640                        bnx2x_mcast_set_registry_size_exact;
3641
3642                /* 57710 is the only chip that uses the exact match for mcast
3643                 * at the moment.
3644                 */
3645                INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3646
3647        } else if (CHIP_IS_E1H(bp)) {
3648                mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3649                mcast_obj->enqueue_cmd   = NULL;
3650                mcast_obj->hdl_restore   = NULL;
3651                mcast_obj->check_pending = bnx2x_mcast_check_pending;
3652
3653                /* 57711 doesn't send a ramrod, so it has unlimited credit
3654                 * for one command.
3655                 */
3656                mcast_obj->max_cmd_len       = -1;
3657                mcast_obj->wait_comp         = bnx2x_mcast_wait;
3658                mcast_obj->set_one_rule      = NULL;
3659                mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3660                mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3661                mcast_obj->get_registry_size =
3662                        bnx2x_mcast_get_registry_size_aprox;
3663                mcast_obj->set_registry_size =
3664                        bnx2x_mcast_set_registry_size_aprox;
3665        } else {
3666                mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3667                mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3668                mcast_obj->hdl_restore       =
3669                        bnx2x_mcast_handle_restore_cmd_e2;
3670                mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3671                /* TODO: There should be a proper HSI define for this number!!!
3672                 */
3673                mcast_obj->max_cmd_len       = 16;
3674                mcast_obj->wait_comp         = bnx2x_mcast_wait;
3675                mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3676                mcast_obj->validate          = bnx2x_mcast_validate_e2;
3677                mcast_obj->revert            = bnx2x_mcast_revert_e2;
3678                mcast_obj->get_registry_size =
3679                        bnx2x_mcast_get_registry_size_aprox;
3680                mcast_obj->set_registry_size =
3681                        bnx2x_mcast_set_registry_size_aprox;
3682        }
3683}
3684
3685/*************************** Credit handling **********************************/
3686
3687/**
3688 * atomic_add_ifless - add if the result is less than a given value.
3689 *
3690 * @v:  pointer of type atomic_t
3691 * @a:  the amount to add to v...
3692 * @u:  ...if (v + a) is less than u.
3693 *
3694 * returns true if (v + a) was less than u, and false otherwise.
3695 *
3696 */
3697static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3698{
3699        int c, old;
3700
3701        c = atomic_read(v);
3702        for (;;) {
3703                if (unlikely(c + a >= u))
3704                        return false;
3705
3706                old = atomic_cmpxchg((v), c, c + a);
3707                if (likely(old == c))
3708                        break;
3709                c = old;
3710        }
3711
3712        return true;
3713}
3714
3715/**
3716 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3717 *
3718 * @v:  pointer of type atomic_t
3719 * @a:  the amount to dec from v...
3720 * @u:  ...if (v - a) is more or equal than u.
3721 *
3722 * returns true if (v - a) was more or equal than u, and false
3723 * otherwise.
3724 */
3725static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3726{
3727        int c, old;
3728
3729        c = atomic_read(v);
3730        for (;;) {
3731                if (unlikely(c - a < u))
3732                        return false;
3733
3734                old = atomic_cmpxchg((v), c, c - a);
3735                if (likely(old == c))
3736                        break;
3737                c = old;
3738        }
3739
3740        return true;
3741}
3742
3743static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3744{
3745        bool rc;
3746
3747        smp_mb();
3748        rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3749        smp_mb();
3750
3751        return rc;
3752}
3753
3754static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3755{
3756        bool rc;
3757
3758        smp_mb();
3759
3760        /* Don't let to refill if credit + cnt > pool_sz */
3761        rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3762
3763        smp_mb();
3764
3765        return rc;
3766}
3767
3768static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3769{
3770        int cur_credit;
3771
3772        smp_mb();
3773        cur_credit = atomic_read(&o->credit);
3774
3775        return cur_credit;
3776}
3777
3778static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3779                                          int cnt)
3780{
3781        return true;
3782}
3783
3784static bool bnx2x_credit_pool_get_entry(
3785        struct bnx2x_credit_pool_obj *o,
3786        int *offset)
3787{
3788        int idx, vec, i;
3789
3790        *offset = -1;
3791
3792        /* Find "internal cam-offset" then add to base for this object... */
3793        for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3794
3795                /* Skip the current vector if there are no free entries in it */
3796                if (!o->pool_mirror[vec])
3797                        continue;
3798
3799                /* If we've got here we are going to find a free entry */
3800                for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3801                      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3802
3803                        if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3804                                /* Got one!! */
3805                                BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3806                                *offset = o->base_pool_offset + idx;
3807                                return true;
3808                        }
3809        }
3810
3811        return false;
3812}
3813
3814static bool bnx2x_credit_pool_put_entry(
3815        struct bnx2x_credit_pool_obj *o,
3816        int offset)
3817{
3818        if (offset < o->base_pool_offset)
3819                return false;
3820
3821        offset -= o->base_pool_offset;
3822
3823        if (offset >= o->pool_sz)
3824                return false;
3825
3826        /* Return the entry to the pool */
3827        BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3828
3829        return true;
3830}
3831
3832static bool bnx2x_credit_pool_put_entry_always_true(
3833        struct bnx2x_credit_pool_obj *o,
3834        int offset)
3835{
3836        return true;
3837}
3838
3839static bool bnx2x_credit_pool_get_entry_always_true(
3840        struct bnx2x_credit_pool_obj *o,
3841        int *offset)
3842{
3843        *offset = -1;
3844        return true;
3845}
3846/**
3847 * bnx2x_init_credit_pool - initialize credit pool internals.
3848 *
3849 * @p:
3850 * @base:       Base entry in the CAM to use.
3851 * @credit:     pool size.
3852 *
3853 * If base is negative no CAM entries handling will be performed.
3854 * If credit is negative pool operations will always succeed (unlimited pool).
3855 *
3856 */
3857static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3858                                          int base, int credit)
3859{
3860        /* Zero the object first */
3861        memset(p, 0, sizeof(*p));
3862
3863        /* Set the table to all 1s */
3864        memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3865
3866        /* Init a pool as full */
3867        atomic_set(&p->credit, credit);
3868
3869        /* The total poll size */
3870        p->pool_sz = credit;
3871
3872        p->base_pool_offset = base;
3873
3874        /* Commit the change */
3875        smp_mb();
3876
3877        p->check = bnx2x_credit_pool_check;
3878
3879        /* if pool credit is negative - disable the checks */
3880        if (credit >= 0) {
3881                p->put      = bnx2x_credit_pool_put;
3882                p->get      = bnx2x_credit_pool_get;
3883                p->put_entry = bnx2x_credit_pool_put_entry;
3884                p->get_entry = bnx2x_credit_pool_get_entry;
3885        } else {
3886                p->put      = bnx2x_credit_pool_always_true;
3887                p->get      = bnx2x_credit_pool_always_true;
3888                p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3889                p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3890        }
3891
3892        /* If base is negative - disable entries handling */
3893        if (base < 0) {
3894                p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3895                p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3896        }
3897}
3898
3899void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3900                                struct bnx2x_credit_pool_obj *p, u8 func_id,
3901                                u8 func_num)
3902{
3903/* TODO: this will be defined in consts as well... */
3904#define BNX2X_CAM_SIZE_EMUL 5
3905
3906        int cam_sz;
3907
3908        if (CHIP_IS_E1(bp)) {
3909                /* In E1, Multicast is saved in cam... */
3910                if (!CHIP_REV_IS_SLOW(bp))
3911                        cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3912                else
3913                        cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3914
3915                bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3916
3917        } else if (CHIP_IS_E1H(bp)) {
3918                /* CAM credit is equaly divided between all active functions
3919                 * on the PORT!.
3920                 */
3921                if ((func_num > 0)) {
3922                        if (!CHIP_REV_IS_SLOW(bp))
3923                                cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3924                        else
3925                                cam_sz = BNX2X_CAM_SIZE_EMUL;
3926                        bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3927                } else {
3928                        /* this should never happen! Block MAC operations. */
3929                        bnx2x_init_credit_pool(p, 0, 0);
3930                }
3931
3932        } else {
3933
3934                /* CAM credit is equaly divided between all active functions
3935                 * on the PATH.
3936                 */
3937                if ((func_num > 0)) {
3938                        if (!CHIP_REV_IS_SLOW(bp))
3939                                cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3940                        else
3941                                cam_sz = BNX2X_CAM_SIZE_EMUL;
3942
3943                        /* No need for CAM entries handling for 57712 and
3944                         * newer.
3945                         */
3946                        bnx2x_init_credit_pool(p, -1, cam_sz);
3947                } else {
3948                        /* this should never happen! Block MAC operations. */
3949                        bnx2x_init_credit_pool(p, 0, 0);
3950                }
3951        }
3952}
3953
3954void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3955                                 struct bnx2x_credit_pool_obj *p,
3956                                 u8 func_id,
3957                                 u8 func_num)
3958{
3959        if (CHIP_IS_E1x(bp)) {
3960                /* There is no VLAN credit in HW on 57710 and 57711 only
3961                 * MAC / MAC-VLAN can be set
3962                 */
3963                bnx2x_init_credit_pool(p, 0, -1);
3964        } else {
3965                /* CAM credit is equally divided between all active functions
3966                 * on the PATH.
3967                 */
3968                if (func_num > 0) {
3969                        int credit = MAX_VLAN_CREDIT_E2 / func_num;
3970                        bnx2x_init_credit_pool(p, func_id * credit, credit);
3971                } else
3972                        /* this should never happen! Block VLAN operations. */
3973                        bnx2x_init_credit_pool(p, 0, 0);
3974        }
3975}
3976
3977/****************** RSS Configuration ******************/
3978/**
3979 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3980 *
3981 * @bp:         driver handle
3982 * @p:          pointer to rss configuration
3983 *
3984 * Prints it when NETIF_MSG_IFUP debug level is configured.
3985 */
3986static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3987                                        struct bnx2x_config_rss_params *p)
3988{
3989        int i;
3990
3991        DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3992        DP(BNX2X_MSG_SP, "0x0000: ");
3993        for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3994                DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3995
3996                /* Print 4 bytes in a line */
3997                if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3998                    (((i + 1) & 0x3) == 0)) {
3999                        DP_CONT(BNX2X_MSG_SP, "\n");
4000                        DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4001                }
4002        }
4003
4004        DP_CONT(BNX2X_MSG_SP, "\n");
4005}
4006
4007/**
4008 * bnx2x_setup_rss - configure RSS
4009 *
4010 * @bp:         device handle
4011 * @p:          rss configuration
4012 *
4013 * sends on UPDATE ramrod for that matter.
4014 */
4015static int bnx2x_setup_rss(struct bnx2x *bp,
4016                           struct bnx2x_config_rss_params *p)
4017{
4018        struct bnx2x_rss_config_obj *o = p->rss_obj;
4019        struct bnx2x_raw_obj *r = &o->raw;
4020        struct eth_rss_update_ramrod_data *data =
4021                (struct eth_rss_update_ramrod_data *)(r->rdata);
4022        u16 caps = 0;
4023        u8 rss_mode = 0;
4024        int rc;
4025
4026        memset(data, 0, sizeof(*data));
4027
4028        DP(BNX2X_MSG_SP, "Configuring RSS\n");
4029
4030        /* Set an echo field */
4031        data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4032                                 (r->state << BNX2X_SWCID_SHIFT));
4033
4034        /* RSS mode */
4035        if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4036                rss_mode = ETH_RSS_MODE_DISABLED;
4037        else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4038                rss_mode = ETH_RSS_MODE_REGULAR;
4039
4040        data->rss_mode = rss_mode;
4041
4042        DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4043
4044        /* RSS capabilities */
4045        if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4046                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4047
4048        if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4049                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4050
4051        if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4052                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4053
4054        if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4055                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4056
4057        if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4058                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4059
4060        if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4061                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4062
4063        if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
4064                caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
4065
4066        /* RSS keys */
4067        if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4068                memcpy(&data->rss_key[0], &p->rss_key[0],
4069                       sizeof(data->rss_key));
4070                caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4071        }
4072
4073        data->capabilities = cpu_to_le16(caps);
4074
4075        /* Hashing mask */
4076        data->rss_result_mask = p->rss_result_mask;
4077
4078        /* RSS engine ID */
4079        data->rss_engine_id = o->engine_id;
4080
4081        DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4082
4083        /* Indirection table */
4084        memcpy(data->indirection_table, p->ind_table,
4085                  T_ETH_INDIRECTION_TABLE_SIZE);
4086
4087        /* Remember the last configuration */
4088        memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4089
4090        /* Print the indirection table */
4091        if (netif_msg_ifup(bp))
4092                bnx2x_debug_print_ind_table(bp, p);
4093
4094        /* No need for an explicit memory barrier here as long as we
4095         * ensure the ordering of writing to the SPQ element
4096         * and updating of the SPQ producer which involves a memory
4097         * read. If the memory read is removed we will have to put a
4098         * full memory barrier there (inside bnx2x_sp_post()).
4099         */
4100
4101        /* Send a ramrod */
4102        rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4103                           U64_HI(r->rdata_mapping),
4104                           U64_LO(r->rdata_mapping),
4105                           ETH_CONNECTION_TYPE);
4106
4107        if (rc < 0)
4108                return rc;
4109
4110        return 1;
4111}
4112
4113void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4114                             u8 *ind_table)
4115{
4116        memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4117}
4118
4119int bnx2x_config_rss(struct bnx2x *bp,
4120                     struct bnx2x_config_rss_params *p)
4121{
4122        int rc;
4123        struct bnx2x_rss_config_obj *o = p->rss_obj;
4124        struct bnx2x_raw_obj *r = &o->raw;
4125
4126        /* Do nothing if only driver cleanup was requested */
4127        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4128                DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4129                   p->ramrod_flags);
4130                return 0;
4131        }
4132
4133        r->set_pending(r);
4134
4135        rc = o->config_rss(bp, p);
4136        if (rc < 0) {
4137                r->clear_pending(r);
4138                return rc;
4139        }
4140
4141        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4142                rc = r->wait_comp(bp, r);
4143
4144        return rc;
4145}
4146
4147void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4148                               struct bnx2x_rss_config_obj *rss_obj,
4149                               u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4150                               void *rdata, dma_addr_t rdata_mapping,
4151                               int state, unsigned long *pstate,
4152                               bnx2x_obj_type type)
4153{
4154        bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4155                           rdata_mapping, state, pstate, type);
4156
4157        rss_obj->engine_id  = engine_id;
4158        rss_obj->config_rss = bnx2x_setup_rss;
4159}
4160
4161/********************** Queue state object ***********************************/
4162
4163/**
4164 * bnx2x_queue_state_change - perform Queue state change transition
4165 *
4166 * @bp:         device handle
4167 * @params:     parameters to perform the transition
4168 *
4169 * returns 0 in case of successfully completed transition, negative error
4170 * code in case of failure, positive (EBUSY) value if there is a completion
4171 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4172 * not set in params->ramrod_flags for asynchronous commands).
4173 *
4174 */
4175int bnx2x_queue_state_change(struct bnx2x *bp,
4176                             struct bnx2x_queue_state_params *params)
4177{
4178        struct bnx2x_queue_sp_obj *o = params->q_obj;
4179        int rc, pending_bit;
4180        unsigned long *pending = &o->pending;
4181
4182        /* Check that the requested transition is legal */
4183        rc = o->check_transition(bp, o, params);
4184        if (rc) {
4185                BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4186                return -EINVAL;
4187        }
4188
4189        /* Set "pending" bit */
4190        DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4191        pending_bit = o->set_pending(o, params);
4192        DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4193
4194        /* Don't send a command if only driver cleanup was requested */
4195        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4196                o->complete_cmd(bp, o, pending_bit);
4197        else {
4198                /* Send a ramrod */
4199                rc = o->send_cmd(bp, params);
4200                if (rc) {
4201                        o->next_state = BNX2X_Q_STATE_MAX;
4202                        clear_bit(pending_bit, pending);
4203                        smp_mb__after_atomic();
4204                        return rc;
4205                }
4206
4207                if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4208                        rc = o->wait_comp(bp, o, pending_bit);
4209                        if (rc)
4210                                return rc;
4211
4212                        return 0;
4213                }
4214        }
4215
4216        return !!test_bit(pending_bit, pending);
4217}
4218
4219static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4220                                   struct bnx2x_queue_state_params *params)
4221{
4222        enum bnx2x_queue_cmd cmd = params->cmd, bit;
4223
4224        /* ACTIVATE and DEACTIVATE commands are implemented on top of
4225         * UPDATE command.
4226         */
4227        if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4228            (cmd == BNX2X_Q_CMD_DEACTIVATE))
4229                bit = BNX2X_Q_CMD_UPDATE;
4230        else
4231                bit = cmd;
4232
4233        set_bit(bit, &obj->pending);
4234        return bit;
4235}
4236
4237static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4238                                 struct bnx2x_queue_sp_obj *o,
4239                                 enum bnx2x_queue_cmd cmd)
4240{
4241        return bnx2x_state_wait(bp, cmd, &o->pending);
4242}
4243
4244/**
4245 * bnx2x_queue_comp_cmd - complete the state change command.
4246 *
4247 * @bp:         device handle
4248 * @o:
4249 * @cmd:
4250 *
4251 * Checks that the arrived completion is expected.
4252 */
4253static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4254                                struct bnx2x_queue_sp_obj *o,
4255                                enum bnx2x_queue_cmd cmd)
4256{
4257        unsigned long cur_pending = o->pending;
4258
4259        if (!test_and_clear_bit(cmd, &cur_pending)) {
4260                BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4261                          cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4262                          o->state, cur_pending, o->next_state);
4263                return -EINVAL;
4264        }
4265
4266        if (o->next_tx_only >= o->max_cos)
4267                /* >= because tx only must always be smaller than cos since the
4268                 * primary connection supports COS 0
4269                 */
4270                BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4271                           o->next_tx_only, o->max_cos);
4272
4273        DP(BNX2X_MSG_SP,
4274           "Completing command %d for queue %d, setting state to %d\n",
4275           cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4276
4277        if (o->next_tx_only)  /* print num tx-only if any exist */
4278                DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4279                   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4280
4281        o->state = o->next_state;
4282        o->num_tx_only = o->next_tx_only;
4283        o->next_state = BNX2X_Q_STATE_MAX;
4284
4285        /* It's important that o->state and o->next_state are
4286         * updated before o->pending.
4287         */
4288        wmb();
4289
4290        clear_bit(cmd, &o->pending);
4291        smp_mb__after_atomic();
4292
4293        return 0;
4294}
4295
4296static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4297                                struct bnx2x_queue_state_params *cmd_params,
4298                                struct client_init_ramrod_data *data)
4299{
4300        struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4301
4302        /* Rx data */
4303
4304        /* IPv6 TPA supported for E2 and above only */
4305        data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4306                                CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4307}
4308
4309static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4310                                struct bnx2x_queue_sp_obj *o,
4311                                struct bnx2x_general_setup_params *params,
4312                                struct client_init_general_data *gen_data,
4313                                unsigned long *flags)
4314{
4315        gen_data->client_id = o->cl_id;
4316
4317        if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4318                gen_data->statistics_counter_id =
4319                                        params->stat_id;
4320                gen_data->statistics_en_flg = 1;
4321                gen_data->statistics_zero_flg =
4322                        test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4323        } else
4324                gen_data->statistics_counter_id =
4325                                        DISABLE_STATISTIC_COUNTER_ID_VALUE;
4326
4327        gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4328        gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4329        gen_data->sp_client_id = params->spcl_id;
4330        gen_data->mtu = cpu_to_le16(params->mtu);
4331        gen_data->func_id = o->func_id;
4332
4333        gen_data->cos = params->cos;
4334
4335        gen_data->traffic_type =
4336                test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4337                LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4338
4339        gen_data->fp_hsi_ver = params->fp_hsi;
4340
4341        DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4342           gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4343}
4344
4345static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4346                                struct bnx2x_txq_setup_params *params,
4347                                struct client_init_tx_data *tx_data,
4348                                unsigned long *flags)
4349{
4350        tx_data->enforce_security_flg =
4351                test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4352        tx_data->default_vlan =
4353                cpu_to_le16(params->default_vlan);
4354        tx_data->default_vlan_flg =
4355                test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4356        tx_data->tx_switching_flg =
4357                test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4358        tx_data->anti_spoofing_flg =
4359                test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4360        tx_data->force_default_pri_flg =
4361                test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4362        tx_data->refuse_outband_vlan_flg =
4363                test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4364        tx_data->tunnel_lso_inc_ip_id =
4365                test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4366        tx_data->tunnel_non_lso_pcsum_location =
4367                test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4368                                                            CSUM_ON_BD;
4369
4370        tx_data->tx_status_block_id = params->fw_sb_id;
4371        tx_data->tx_sb_index_number = params->sb_cq_index;
4372        tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4373
4374        tx_data->tx_bd_page_base.lo =
4375                cpu_to_le32(U64_LO(params->dscr_map));
4376        tx_data->tx_bd_page_base.hi =
4377                cpu_to_le32(U64_HI(params->dscr_map));
4378
4379        /* Don't configure any Tx switching mode during queue SETUP */
4380        tx_data->state = 0;
4381}
4382
4383static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4384                                struct rxq_pause_params *params,
4385                                struct client_init_rx_data *rx_data)
4386{
4387        /* flow control data */
4388        rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4389        rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4390        rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4391        rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4392        rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4393        rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4394        rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4395}
4396
4397static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4398                                struct bnx2x_rxq_setup_params *params,
4399                                struct client_init_rx_data *rx_data,
4400                                unsigned long *flags)
4401{
4402        rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4403                                CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4404        rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4405                                CLIENT_INIT_RX_DATA_TPA_MODE;
4406        rx_data->vmqueue_mode_en_flg = 0;
4407
4408        rx_data->cache_line_alignment_log_size =
4409                params->cache_line_log;
4410        rx_data->enable_dynamic_hc =
4411                test_bit(BNX2X_Q_FLG_DHC, flags);
4412        rx_data->max_sges_for_packet = params->max_sges_pkt;
4413        rx_data->client_qzone_id = params->cl_qzone_id;
4414        rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4415
4416        /* Always start in DROP_ALL mode */
4417        rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4418                                     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4419
4420        /* We don't set drop flags */
4421        rx_data->drop_ip_cs_err_flg = 0;
4422        rx_data->drop_tcp_cs_err_flg = 0;
4423        rx_data->drop_ttl0_flg = 0;
4424        rx_data->drop_udp_cs_err_flg = 0;
4425        rx_data->inner_vlan_removal_enable_flg =
4426                test_bit(BNX2X_Q_FLG_VLAN, flags);
4427        rx_data->outer_vlan_removal_enable_flg =
4428                test_bit(BNX2X_Q_FLG_OV, flags);
4429        rx_data->status_block_id = params->fw_sb_id;
4430        rx_data->rx_sb_index_number = params->sb_cq_index;
4431        rx_data->max_tpa_queues = params->max_tpa_queues;
4432        rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4433        rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4434        rx_data->bd_page_base.lo =
4435                cpu_to_le32(U64_LO(params->dscr_map));
4436        rx_data->bd_page_base.hi =
4437                cpu_to_le32(U64_HI(params->dscr_map));
4438        rx_data->sge_page_base.lo =
4439                cpu_to_le32(U64_LO(params->sge_map));
4440        rx_data->sge_page_base.hi =
4441                cpu_to_le32(U64_HI(params->sge_map));
4442        rx_data->cqe_page_base.lo =
4443                cpu_to_le32(U64_LO(params->rcq_map));
4444        rx_data->cqe_page_base.hi =
4445                cpu_to_le32(U64_HI(params->rcq_map));
4446        rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4447
4448        if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4449                rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4450                rx_data->is_approx_mcast = 1;
4451        }
4452
4453        rx_data->rss_engine_id = params->rss_engine_id;
4454
4455        /* silent vlan removal */
4456        rx_data->silent_vlan_removal_flg =
4457                test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4458        rx_data->silent_vlan_value =
4459                cpu_to_le16(params->silent_removal_value);
4460        rx_data->silent_vlan_mask =
4461                cpu_to_le16(params->silent_removal_mask);
4462}
4463
4464/* initialize the general, tx and rx parts of a queue object */
4465static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4466                                struct bnx2x_queue_state_params *cmd_params,
4467                                struct client_init_ramrod_data *data)
4468{
4469        bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4470                                       &cmd_params->params.setup.gen_params,
4471                                       &data->general,
4472                                       &cmd_params->params.setup.flags);
4473
4474        bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4475                                  &cmd_params->params.setup.txq_params,
4476                                  &data->tx,
4477                                  &cmd_params->params.setup.flags);
4478
4479        bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4480                                  &cmd_params->params.setup.rxq_params,
4481                                  &data->rx,
4482                                  &cmd_params->params.setup.flags);
4483
4484        bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4485                                     &cmd_params->params.setup.pause_params,
4486                                     &data->rx);
4487}
4488
4489/* initialize the general and tx parts of a tx-only queue object */
4490static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4491                                struct bnx2x_queue_state_params *cmd_params,
4492                                struct tx_queue_init_ramrod_data *data)
4493{
4494        bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4495                                       &cmd_params->params.tx_only.gen_params,
4496                                       &data->general,
4497                                       &cmd_params->params.tx_only.flags);
4498
4499        bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4500                                  &cmd_params->params.tx_only.txq_params,
4501                                  &data->tx,
4502                                  &cmd_params->params.tx_only.flags);
4503
4504        DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4505                         cmd_params->q_obj->cids[0],
4506                         data->tx.tx_bd_page_base.lo,
4507                         data->tx.tx_bd_page_base.hi);
4508}
4509
4510/**
4511 * bnx2x_q_init - init HW/FW queue
4512 *
4513 * @bp:         device handle
4514 * @params:
4515 *
4516 * HW/FW initial Queue configuration:
4517 *      - HC: Rx and Tx
4518 *      - CDU context validation
4519 *
4520 */
4521static inline int bnx2x_q_init(struct bnx2x *bp,
4522                               struct bnx2x_queue_state_params *params)
4523{
4524        struct bnx2x_queue_sp_obj *o = params->q_obj;
4525        struct bnx2x_queue_init_params *init = &params->params.init;
4526        u16 hc_usec;
4527        u8 cos;
4528
4529        /* Tx HC configuration */
4530        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4531            test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4532                hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4533
4534                bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4535                        init->tx.sb_cq_index,
4536                        !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4537                        hc_usec);
4538        }
4539
4540        /* Rx HC configuration */
4541        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4542            test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4543                hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4544
4545                bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4546                        init->rx.sb_cq_index,
4547                        !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4548                        hc_usec);
4549        }
4550
4551        /* Set CDU context validation values */
4552        for (cos = 0; cos < o->max_cos; cos++) {
4553                DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4554                                 o->cids[cos], cos);
4555                DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4556                bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4557        }
4558
4559        /* As no ramrod is sent, complete the command immediately  */
4560        o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4561
4562        mmiowb();
4563        smp_mb();
4564
4565        return 0;
4566}
4567
4568static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4569                                        struct bnx2x_queue_state_params *params)
4570{
4571        struct bnx2x_queue_sp_obj *o = params->q_obj;
4572        struct client_init_ramrod_data *rdata =
4573                (struct client_init_ramrod_data *)o->rdata;
4574        dma_addr_t data_mapping = o->rdata_mapping;
4575        int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4576
4577        /* Clear the ramrod data */
4578        memset(rdata, 0, sizeof(*rdata));
4579
4580        /* Fill the ramrod data */
4581        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4582
4583        /* No need for an explicit memory barrier here as long as we
4584         * ensure the ordering of writing to the SPQ element
4585         * and updating of the SPQ producer which involves a memory
4586         * read. If the memory read is removed we will have to put a
4587         * full memory barrier there (inside bnx2x_sp_post()).
4588         */
4589        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4590                             U64_HI(data_mapping),
4591                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4592}
4593
4594static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4595                                        struct bnx2x_queue_state_params *params)
4596{
4597        struct bnx2x_queue_sp_obj *o = params->q_obj;
4598        struct client_init_ramrod_data *rdata =
4599                (struct client_init_ramrod_data *)o->rdata;
4600        dma_addr_t data_mapping = o->rdata_mapping;
4601        int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4602
4603        /* Clear the ramrod data */
4604        memset(rdata, 0, sizeof(*rdata));
4605
4606        /* Fill the ramrod data */
4607        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4608        bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4609
4610        /* No need for an explicit memory barrier here as long as we
4611         * ensure the ordering of writing to the SPQ element
4612         * and updating of the SPQ producer which involves a memory
4613         * read. If the memory read is removed we will have to put a
4614         * full memory barrier there (inside bnx2x_sp_post()).
4615         */
4616        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4617                             U64_HI(data_mapping),
4618                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4619}
4620
4621static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4622                                  struct bnx2x_queue_state_params *params)
4623{
4624        struct bnx2x_queue_sp_obj *o = params->q_obj;
4625        struct tx_queue_init_ramrod_data *rdata =
4626                (struct tx_queue_init_ramrod_data *)o->rdata;
4627        dma_addr_t data_mapping = o->rdata_mapping;
4628        int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4629        struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4630                &params->params.tx_only;
4631        u8 cid_index = tx_only_params->cid_index;
4632
4633        if (cid_index >= o->max_cos) {
4634                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4635                          o->cl_id, cid_index);
4636                return -EINVAL;
4637        }
4638
4639        DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4640                         tx_only_params->gen_params.cos,
4641                         tx_only_params->gen_params.spcl_id);
4642
4643        /* Clear the ramrod data */
4644        memset(rdata, 0, sizeof(*rdata));
4645
4646        /* Fill the ramrod data */
4647        bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4648
4649        DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4650                         o->cids[cid_index], rdata->general.client_id,
4651                         rdata->general.sp_client_id, rdata->general.cos);
4652
4653        /* No need for an explicit memory barrier here as long as we
4654         * ensure the ordering of writing to the SPQ element
4655         * and updating of the SPQ producer which involves a memory
4656         * read. If the memory read is removed we will have to put a
4657         * full memory barrier there (inside bnx2x_sp_post()).
4658         */
4659        return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4660                             U64_HI(data_mapping),
4661                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4662}
4663
4664static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4665                                     struct bnx2x_queue_sp_obj *obj,
4666                                     struct bnx2x_queue_update_params *params,
4667                                     struct client_update_ramrod_data *data)
4668{
4669        /* Client ID of the client to update */
4670        data->client_id = obj->cl_id;
4671
4672        /* Function ID of the client to update */
4673        data->func_id = obj->func_id;
4674
4675        /* Default VLAN value */
4676        data->default_vlan = cpu_to_le16(params->def_vlan);
4677
4678        /* Inner VLAN stripping */
4679        data->inner_vlan_removal_enable_flg =
4680                test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4681        data->inner_vlan_removal_change_flg =
4682                test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4683                         &params->update_flags);
4684
4685        /* Outer VLAN stripping */
4686        data->outer_vlan_removal_enable_flg =
4687                test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4688        data->outer_vlan_removal_change_flg =
4689                test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4690                         &params->update_flags);
4691
4692        /* Drop packets that have source MAC that doesn't belong to this
4693         * Queue.
4694         */
4695        data->anti_spoofing_enable_flg =
4696                test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4697        data->anti_spoofing_change_flg =
4698                test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4699
4700        /* Activate/Deactivate */
4701        data->activate_flg =
4702                test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4703        data->activate_change_flg =
4704                test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4705
4706        /* Enable default VLAN */
4707        data->default_vlan_enable_flg =
4708                test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4709        data->default_vlan_change_flg =
4710                test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4711                         &params->update_flags);
4712
4713        /* silent vlan removal */
4714        data->silent_vlan_change_flg =
4715                test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4716                         &params->update_flags);
4717        data->silent_vlan_removal_flg =
4718                test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4719        data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4720        data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4721
4722        /* tx switching */
4723        data->tx_switching_flg =
4724                test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4725        data->tx_switching_change_flg =
4726                test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
4727                         &params->update_flags);
4728
4729        /* PTP */
4730        data->handle_ptp_pkts_flg =
4731                test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
4732        data->handle_ptp_pkts_change_flg =
4733                test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
4734}
4735
4736static inline int bnx2x_q_send_update(struct bnx2x *bp,
4737                                      struct bnx2x_queue_state_params *params)
4738{
4739        struct bnx2x_queue_sp_obj *o = params->q_obj;
4740        struct client_update_ramrod_data *rdata =
4741                (struct client_update_ramrod_data *)o->rdata;
4742        dma_addr_t data_mapping = o->rdata_mapping;
4743        struct bnx2x_queue_update_params *update_params =
4744                &params->params.update;
4745        u8 cid_index = update_params->cid_index;
4746
4747        if (cid_index >= o->max_cos) {
4748                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4749                          o->cl_id, cid_index);
4750                return -EINVAL;
4751        }
4752
4753        /* Clear the ramrod data */
4754        memset(rdata, 0, sizeof(*rdata));
4755
4756        /* Fill the ramrod data */
4757        bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4758
4759        /* No need for an explicit memory barrier here as long as we
4760         * ensure the ordering of writing to the SPQ element
4761         * and updating of the SPQ producer which involves a memory
4762         * read. If the memory read is removed we will have to put a
4763         * full memory barrier there (inside bnx2x_sp_post()).
4764         */
4765        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4766                             o->cids[cid_index], U64_HI(data_mapping),
4767                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4768}
4769
4770/**
4771 * bnx2x_q_send_deactivate - send DEACTIVATE command
4772 *
4773 * @bp:         device handle
4774 * @params:
4775 *
4776 * implemented using the UPDATE command.
4777 */
4778static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4779                                        struct bnx2x_queue_state_params *params)
4780{
4781        struct bnx2x_queue_update_params *update = &params->params.update;
4782
4783        memset(update, 0, sizeof(*update));
4784
4785        __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4786
4787        return bnx2x_q_send_update(bp, params);
4788}
4789
4790/**
4791 * bnx2x_q_send_activate - send ACTIVATE command
4792 *
4793 * @bp:         device handle
4794 * @params:
4795 *
4796 * implemented using the UPDATE command.
4797 */
4798static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4799                                        struct bnx2x_queue_state_params *params)
4800{
4801        struct bnx2x_queue_update_params *update = &params->params.update;
4802
4803        memset(update, 0, sizeof(*update));
4804
4805        __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4806        __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4807
4808        return bnx2x_q_send_update(bp, params);
4809}
4810
4811static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
4812                                struct bnx2x_queue_sp_obj *obj,
4813                                struct bnx2x_queue_update_tpa_params *params,
4814                                struct tpa_update_ramrod_data *data)
4815{
4816        data->client_id = obj->cl_id;
4817        data->complete_on_both_clients = params->complete_on_both_clients;
4818        data->dont_verify_rings_pause_thr_flg =
4819                params->dont_verify_thr;
4820        data->max_agg_size = cpu_to_le16(params->max_agg_sz);
4821        data->max_sges_for_packet = params->max_sges_pkt;
4822        data->max_tpa_queues = params->max_tpa_queues;
4823        data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
4824        data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
4825        data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
4826        data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
4827        data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
4828        data->tpa_mode = params->tpa_mode;
4829        data->update_ipv4 = params->update_ipv4;
4830        data->update_ipv6 = params->update_ipv6;
4831}
4832
4833static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4834                                        struct bnx2x_queue_state_params *params)
4835{
4836        struct bnx2x_queue_sp_obj *o = params->q_obj;
4837        struct tpa_update_ramrod_data *rdata =
4838                (struct tpa_update_ramrod_data *)o->rdata;
4839        dma_addr_t data_mapping = o->rdata_mapping;
4840        struct bnx2x_queue_update_tpa_params *update_tpa_params =
4841                &params->params.update_tpa;
4842        u16 type;
4843
4844        /* Clear the ramrod data */
4845        memset(rdata, 0, sizeof(*rdata));
4846
4847        /* Fill the ramrod data */
4848        bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
4849
4850        /* Add the function id inside the type, so that sp post function
4851         * doesn't automatically add the PF func-id, this is required
4852         * for operations done by PFs on behalf of their VFs
4853         */
4854        type = ETH_CONNECTION_TYPE |
4855                ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
4856
4857        /* No need for an explicit memory barrier here as long as we
4858         * ensure the ordering of writing to the SPQ element
4859         * and updating of the SPQ producer which involves a memory
4860         * read. If the memory read is removed we will have to put a
4861         * full memory barrier there (inside bnx2x_sp_post()).
4862         */
4863        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
4864                             o->cids[BNX2X_PRIMARY_CID_INDEX],
4865                             U64_HI(data_mapping),
4866                             U64_LO(data_mapping), type);
4867}
4868
4869static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4870                                    struct bnx2x_queue_state_params *params)
4871{
4872        struct bnx2x_queue_sp_obj *o = params->q_obj;
4873
4874        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4875                             o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4876                             ETH_CONNECTION_TYPE);
4877}
4878
4879static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4880                                       struct bnx2x_queue_state_params *params)
4881{
4882        struct bnx2x_queue_sp_obj *o = params->q_obj;
4883        u8 cid_idx = params->params.cfc_del.cid_index;
4884
4885        if (cid_idx >= o->max_cos) {
4886                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4887                          o->cl_id, cid_idx);
4888                return -EINVAL;
4889        }
4890
4891        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4892                             o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4893}
4894
4895static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4896                                        struct bnx2x_queue_state_params *params)
4897{
4898        struct bnx2x_queue_sp_obj *o = params->q_obj;
4899        u8 cid_index = params->params.terminate.cid_index;
4900
4901        if (cid_index >= o->max_cos) {
4902                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4903                          o->cl_id, cid_index);
4904                return -EINVAL;
4905        }
4906
4907        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4908                             o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4909}
4910
4911static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4912                                     struct bnx2x_queue_state_params *params)
4913{
4914        struct bnx2x_queue_sp_obj *o = params->q_obj;
4915
4916        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4917                             o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4918                             ETH_CONNECTION_TYPE);
4919}
4920
4921static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4922                                        struct bnx2x_queue_state_params *params)
4923{
4924        switch (params->cmd) {
4925        case BNX2X_Q_CMD_INIT:
4926                return bnx2x_q_init(bp, params);
4927        case BNX2X_Q_CMD_SETUP_TX_ONLY:
4928                return bnx2x_q_send_setup_tx_only(bp, params);
4929        case BNX2X_Q_CMD_DEACTIVATE:
4930                return bnx2x_q_send_deactivate(bp, params);
4931        case BNX2X_Q_CMD_ACTIVATE:
4932                return bnx2x_q_send_activate(bp, params);
4933        case BNX2X_Q_CMD_UPDATE:
4934                return bnx2x_q_send_update(bp, params);
4935        case BNX2X_Q_CMD_UPDATE_TPA:
4936                return bnx2x_q_send_update_tpa(bp, params);
4937        case BNX2X_Q_CMD_HALT:
4938                return bnx2x_q_send_halt(bp, params);
4939        case BNX2X_Q_CMD_CFC_DEL:
4940                return bnx2x_q_send_cfc_del(bp, params);
4941        case BNX2X_Q_CMD_TERMINATE:
4942                return bnx2x_q_send_terminate(bp, params);
4943        case BNX2X_Q_CMD_EMPTY:
4944                return bnx2x_q_send_empty(bp, params);
4945        default:
4946                BNX2X_ERR("Unknown command: %d\n", params->cmd);
4947                return -EINVAL;
4948        }
4949}
4950
4951static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4952                                    struct bnx2x_queue_state_params *params)
4953{
4954        switch (params->cmd) {
4955        case BNX2X_Q_CMD_SETUP:
4956                return bnx2x_q_send_setup_e1x(bp, params);
4957        case BNX2X_Q_CMD_INIT:
4958        case BNX2X_Q_CMD_SETUP_TX_ONLY:
4959        case BNX2X_Q_CMD_DEACTIVATE:
4960        case BNX2X_Q_CMD_ACTIVATE:
4961        case BNX2X_Q_CMD_UPDATE:
4962        case BNX2X_Q_CMD_UPDATE_TPA:
4963        case BNX2X_Q_CMD_HALT:
4964        case BNX2X_Q_CMD_CFC_DEL:
4965        case BNX2X_Q_CMD_TERMINATE:
4966        case BNX2X_Q_CMD_EMPTY:
4967                return bnx2x_queue_send_cmd_cmn(bp, params);
4968        default:
4969                BNX2X_ERR("Unknown command: %d\n", params->cmd);
4970                return -EINVAL;
4971        }
4972}
4973
4974static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4975                                   struct bnx2x_queue_state_params *params)
4976{
4977        switch (params->cmd) {
4978        case BNX2X_Q_CMD_SETUP:
4979                return bnx2x_q_send_setup_e2(bp, params);
4980        case BNX2X_Q_CMD_INIT:
4981        case BNX2X_Q_CMD_SETUP_TX_ONLY:
4982        case BNX2X_Q_CMD_DEACTIVATE:
4983        case BNX2X_Q_CMD_ACTIVATE:
4984        case BNX2X_Q_CMD_UPDATE:
4985        case BNX2X_Q_CMD_UPDATE_TPA:
4986        case BNX2X_Q_CMD_HALT:
4987        case BNX2X_Q_CMD_CFC_DEL:
4988        case BNX2X_Q_CMD_TERMINATE:
4989        case BNX2X_Q_CMD_EMPTY:
4990                return bnx2x_queue_send_cmd_cmn(bp, params);
4991        default:
4992                BNX2X_ERR("Unknown command: %d\n", params->cmd);
4993                return -EINVAL;
4994        }
4995}
4996
4997/**
4998 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4999 *
5000 * @bp:         device handle
5001 * @o:
5002 * @params:
5003 *
5004 * (not Forwarding)
5005 * It both checks if the requested command is legal in a current
5006 * state and, if it's legal, sets a `next_state' in the object
5007 * that will be used in the completion flow to set the `state'
5008 * of the object.
5009 *
5010 * returns 0 if a requested command is a legal transition,
5011 *         -EINVAL otherwise.
5012 */
5013static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5014                                      struct bnx2x_queue_sp_obj *o,
5015                                      struct bnx2x_queue_state_params *params)
5016{
5017        enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5018        enum bnx2x_queue_cmd cmd = params->cmd;
5019        struct bnx2x_queue_update_params *update_params =
5020                 &params->params.update;
5021        u8 next_tx_only = o->num_tx_only;
5022
5023        /* Forget all pending for completion commands if a driver only state
5024         * transition has been requested.
5025         */
5026        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5027                o->pending = 0;
5028                o->next_state = BNX2X_Q_STATE_MAX;
5029        }
5030
5031        /* Don't allow a next state transition if we are in the middle of
5032         * the previous one.
5033         */
5034        if (o->pending) {
5035                BNX2X_ERR("Blocking transition since pending was %lx\n",
5036                          o->pending);
5037                return -EBUSY;
5038        }
5039
5040        switch (state) {
5041        case BNX2X_Q_STATE_RESET:
5042                if (cmd == BNX2X_Q_CMD_INIT)
5043                        next_state = BNX2X_Q_STATE_INITIALIZED;
5044
5045                break;
5046        case BNX2X_Q_STATE_INITIALIZED:
5047                if (cmd == BNX2X_Q_CMD_SETUP) {
5048                        if (test_bit(BNX2X_Q_FLG_ACTIVE,
5049                                     &params->params.setup.flags))
5050                                next_state = BNX2X_Q_STATE_ACTIVE;
5051                        else
5052                                next_state = BNX2X_Q_STATE_INACTIVE;
5053                }
5054
5055                break;
5056        case BNX2X_Q_STATE_ACTIVE:
5057                if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5058                        next_state = BNX2X_Q_STATE_INACTIVE;
5059
5060                else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5061                         (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5062                        next_state = BNX2X_Q_STATE_ACTIVE;
5063
5064                else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5065                        next_state = BNX2X_Q_STATE_MULTI_COS;
5066                        next_tx_only = 1;
5067                }
5068
5069                else if (cmd == BNX2X_Q_CMD_HALT)
5070                        next_state = BNX2X_Q_STATE_STOPPED;
5071
5072                else if (cmd == BNX2X_Q_CMD_UPDATE) {
5073                        /* If "active" state change is requested, update the
5074                         *  state accordingly.
5075                         */
5076                        if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5077                                     &update_params->update_flags) &&
5078                            !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5079                                      &update_params->update_flags))
5080                                next_state = BNX2X_Q_STATE_INACTIVE;
5081                        else
5082                                next_state = BNX2X_Q_STATE_ACTIVE;
5083                }
5084
5085                break;
5086        case BNX2X_Q_STATE_MULTI_COS:
5087                if (cmd == BNX2X_Q_CMD_TERMINATE)
5088                        next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5089
5090                else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5091                        next_state = BNX2X_Q_STATE_MULTI_COS;
5092                        next_tx_only = o->num_tx_only + 1;
5093                }
5094
5095                else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5096                         (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5097                        next_state = BNX2X_Q_STATE_MULTI_COS;
5098
5099                else if (cmd == BNX2X_Q_CMD_UPDATE) {
5100                        /* If "active" state change is requested, update the
5101                         *  state accordingly.
5102                         */
5103                        if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5104                                     &update_params->update_flags) &&
5105                            !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5106                                      &update_params->update_flags))
5107                                next_state = BNX2X_Q_STATE_INACTIVE;
5108                        else
5109                                next_state = BNX2X_Q_STATE_MULTI_COS;
5110                }
5111
5112                break;
5113        case BNX2X_Q_STATE_MCOS_TERMINATED:
5114                if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5115                        next_tx_only = o->num_tx_only - 1;
5116                        if (next_tx_only == 0)
5117                                next_state = BNX2X_Q_STATE_ACTIVE;
5118                        else
5119                                next_state = BNX2X_Q_STATE_MULTI_COS;
5120                }
5121
5122                break;
5123        case BNX2X_Q_STATE_INACTIVE:
5124                if (cmd == BNX2X_Q_CMD_ACTIVATE)
5125                        next_state = BNX2X_Q_STATE_ACTIVE;
5126
5127                else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5128                         (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5129                        next_state = BNX2X_Q_STATE_INACTIVE;
5130
5131                else if (cmd == BNX2X_Q_CMD_HALT)
5132                        next_state = BNX2X_Q_STATE_STOPPED;
5133
5134                else if (cmd == BNX2X_Q_CMD_UPDATE) {
5135                        /* If "active" state change is requested, update the
5136                         * state accordingly.
5137                         */
5138                        if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5139                                     &update_params->update_flags) &&
5140                            test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5141                                     &update_params->update_flags)){
5142                                if (o->num_tx_only == 0)
5143                                        next_state = BNX2X_Q_STATE_ACTIVE;
5144                                else /* tx only queues exist for this queue */
5145                                        next_state = BNX2X_Q_STATE_MULTI_COS;
5146                        } else
5147                                next_state = BNX2X_Q_STATE_INACTIVE;
5148                }
5149
5150                break;
5151        case BNX2X_Q_STATE_STOPPED:
5152                if (cmd == BNX2X_Q_CMD_TERMINATE)
5153                        next_state = BNX2X_Q_STATE_TERMINATED;
5154
5155                break;
5156        case BNX2X_Q_STATE_TERMINATED:
5157                if (cmd == BNX2X_Q_CMD_CFC_DEL)
5158                        next_state = BNX2X_Q_STATE_RESET;
5159
5160                break;
5161        default:
5162                BNX2X_ERR("Illegal state: %d\n", state);
5163        }
5164
5165        /* Transition is assured */
5166        if (next_state != BNX2X_Q_STATE_MAX) {
5167                DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5168                                 state, cmd, next_state);
5169                o->next_state = next_state;
5170                o->next_tx_only = next_tx_only;
5171                return 0;
5172        }
5173
5174        DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5175
5176        return -EINVAL;
5177}
5178
5179void bnx2x_init_queue_obj(struct bnx2x *bp,
5180                          struct bnx2x_queue_sp_obj *obj,
5181                          u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5182                          void *rdata,
5183                          dma_addr_t rdata_mapping, unsigned long type)
5184{
5185        memset(obj, 0, sizeof(*obj));
5186
5187        /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5188        BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5189
5190        memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5191        obj->max_cos = cid_cnt;
5192        obj->cl_id = cl_id;
5193        obj->func_id = func_id;
5194        obj->rdata = rdata;
5195        obj->rdata_mapping = rdata_mapping;
5196        obj->type = type;
5197        obj->next_state = BNX2X_Q_STATE_MAX;
5198
5199        if (CHIP_IS_E1x(bp))
5200                obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5201        else
5202                obj->send_cmd = bnx2x_queue_send_cmd_e2;
5203
5204        obj->check_transition = bnx2x_queue_chk_transition;
5205
5206        obj->complete_cmd = bnx2x_queue_comp_cmd;
5207        obj->wait_comp = bnx2x_queue_wait_comp;
5208        obj->set_pending = bnx2x_queue_set_pending;
5209}
5210
5211/* return a queue object's logical state*/
5212int bnx2x_get_q_logical_state(struct bnx2x *bp,
5213                               struct bnx2x_queue_sp_obj *obj)
5214{
5215        switch (obj->state) {
5216        case BNX2X_Q_STATE_ACTIVE:
5217        case BNX2X_Q_STATE_MULTI_COS:
5218                return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5219        case BNX2X_Q_STATE_RESET:
5220        case BNX2X_Q_STATE_INITIALIZED:
5221        case BNX2X_Q_STATE_MCOS_TERMINATED:
5222        case BNX2X_Q_STATE_INACTIVE:
5223        case BNX2X_Q_STATE_STOPPED:
5224        case BNX2X_Q_STATE_TERMINATED:
5225        case BNX2X_Q_STATE_FLRED:
5226                return BNX2X_Q_LOGICAL_STATE_STOPPED;
5227        default:
5228                return -EINVAL;
5229        }
5230}
5231
5232/********************** Function state object *********************************/
5233enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5234                                           struct bnx2x_func_sp_obj *o)
5235{
5236        /* in the middle of transaction - return INVALID state */
5237        if (o->pending)
5238                return BNX2X_F_STATE_MAX;
5239
5240        /* unsure the order of reading of o->pending and o->state
5241         * o->pending should be read first
5242         */
5243        rmb();
5244
5245        return o->state;
5246}
5247
5248static int bnx2x_func_wait_comp(struct bnx2x *bp,
5249                                struct bnx2x_func_sp_obj *o,
5250                                enum bnx2x_func_cmd cmd)
5251{
5252        return bnx2x_state_wait(bp, cmd, &o->pending);
5253}
5254
5255/**
5256 * bnx2x_func_state_change_comp - complete the state machine transition
5257 *
5258 * @bp:         device handle
5259 * @o:
5260 * @cmd:
5261 *
5262 * Called on state change transition. Completes the state
5263 * machine transition only - no HW interaction.
5264 */
5265static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5266                                               struct bnx2x_func_sp_obj *o,
5267                                               enum bnx2x_func_cmd cmd)
5268{
5269        unsigned long cur_pending = o->pending;
5270
5271        if (!test_and_clear_bit(cmd, &cur_pending)) {
5272                BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5273                          cmd, BP_FUNC(bp), o->state,
5274                          cur_pending, o->next_state);
5275                return -EINVAL;
5276        }
5277
5278        DP(BNX2X_MSG_SP,
5279           "Completing command %d for func %d, setting state to %d\n",
5280           cmd, BP_FUNC(bp), o->next_state);
5281
5282        o->state = o->next_state;
5283        o->next_state = BNX2X_F_STATE_MAX;
5284
5285        /* It's important that o->state and o->next_state are
5286         * updated before o->pending.
5287         */
5288        wmb();
5289
5290        clear_bit(cmd, &o->pending);
5291        smp_mb__after_atomic();
5292
5293        return 0;
5294}
5295
5296/**
5297 * bnx2x_func_comp_cmd - complete the state change command
5298 *
5299 * @bp:         device handle
5300 * @o:
5301 * @cmd:
5302 *
5303 * Checks that the arrived completion is expected.
5304 */
5305static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5306                               struct bnx2x_func_sp_obj *o,
5307                               enum bnx2x_func_cmd cmd)
5308{
5309        /* Complete the state machine part first, check if it's a
5310         * legal completion.
5311         */
5312        int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5313        return rc;
5314}
5315
5316/**
5317 * bnx2x_func_chk_transition - perform function state machine transition
5318 *
5319 * @bp:         device handle
5320 * @o:
5321 * @params:
5322 *
5323 * It both checks if the requested command is legal in a current
5324 * state and, if it's legal, sets a `next_state' in the object
5325 * that will be used in the completion flow to set the `state'
5326 * of the object.
5327 *
5328 * returns 0 if a requested command is a legal transition,
5329 *         -EINVAL otherwise.
5330 */
5331static int bnx2x_func_chk_transition(struct bnx2x *bp,
5332                                     struct bnx2x_func_sp_obj *o,
5333                                     struct bnx2x_func_state_params *params)
5334{
5335        enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5336        enum bnx2x_func_cmd cmd = params->cmd;
5337
5338        /* Forget all pending for completion commands if a driver only state
5339         * transition has been requested.
5340         */
5341        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5342                o->pending = 0;
5343                o->next_state = BNX2X_F_STATE_MAX;
5344        }
5345
5346        /* Don't allow a next state transition if we are in the middle of
5347         * the previous one.
5348         */
5349        if (o->pending)
5350                return -EBUSY;
5351
5352        switch (state) {
5353        case BNX2X_F_STATE_RESET:
5354                if (cmd == BNX2X_F_CMD_HW_INIT)
5355                        next_state = BNX2X_F_STATE_INITIALIZED;
5356
5357                break;
5358        case BNX2X_F_STATE_INITIALIZED:
5359                if (cmd == BNX2X_F_CMD_START)
5360                        next_state = BNX2X_F_STATE_STARTED;
5361
5362                else if (cmd == BNX2X_F_CMD_HW_RESET)
5363                        next_state = BNX2X_F_STATE_RESET;
5364
5365                break;
5366        case BNX2X_F_STATE_STARTED:
5367                if (cmd == BNX2X_F_CMD_STOP)
5368                        next_state = BNX2X_F_STATE_INITIALIZED;
5369                /* afex ramrods can be sent only in started mode, and only
5370                 * if not pending for function_stop ramrod completion
5371                 * for these events - next state remained STARTED.
5372                 */
5373                else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5374                         (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5375                        next_state = BNX2X_F_STATE_STARTED;
5376
5377                else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5378                         (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5379                        next_state = BNX2X_F_STATE_STARTED;
5380
5381                /* Switch_update ramrod can be sent in either started or
5382                 * tx_stopped state, and it doesn't change the state.
5383                 */
5384                else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5385                         (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5386                        next_state = BNX2X_F_STATE_STARTED;
5387
5388                else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
5389                         (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5390                        next_state = BNX2X_F_STATE_STARTED;
5391
5392                else if (cmd == BNX2X_F_CMD_TX_STOP)
5393                        next_state = BNX2X_F_STATE_TX_STOPPED;
5394
5395                break;
5396        case BNX2X_F_STATE_TX_STOPPED:
5397                if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5398                    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5399                        next_state = BNX2X_F_STATE_TX_STOPPED;
5400
5401                else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
5402                         (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5403                        next_state = BNX2X_F_STATE_TX_STOPPED;
5404
5405                else if (cmd == BNX2X_F_CMD_TX_START)
5406                        next_state = BNX2X_F_STATE_STARTED;
5407
5408                break;
5409        default:
5410                BNX2X_ERR("Unknown state: %d\n", state);
5411        }
5412
5413        /* Transition is assured */
5414        if (next_state != BNX2X_F_STATE_MAX) {
5415                DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5416                                 state, cmd, next_state);
5417                o->next_state = next_state;
5418                return 0;
5419        }
5420
5421        DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5422                         state, cmd);
5423
5424        return -EINVAL;
5425}
5426
5427/**
5428 * bnx2x_func_init_func - performs HW init at function stage
5429 *
5430 * @bp:         device handle
5431 * @drv:
5432 *
5433 * Init HW when the current phase is
5434 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5435 * HW blocks.
5436 */
5437static inline int bnx2x_func_init_func(struct bnx2x *bp,
5438                                       const struct bnx2x_func_sp_drv_ops *drv)
5439{
5440        return drv->init_hw_func(bp);
5441}
5442
5443/**
5444 * bnx2x_func_init_port - performs HW init at port stage
5445 *
5446 * @bp:         device handle
5447 * @drv:
5448 *
5449 * Init HW when the current phase is
5450 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5451 * FUNCTION-only HW blocks.
5452 *
5453 */
5454static inline int bnx2x_func_init_port(struct bnx2x *bp,
5455                                       const struct bnx2x_func_sp_drv_ops *drv)
5456{
5457        int rc = drv->init_hw_port(bp);
5458        if (rc)
5459                return rc;
5460
5461        return bnx2x_func_init_func(bp, drv);
5462}
5463
5464/**
5465 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5466 *
5467 * @bp:         device handle
5468 * @drv:
5469 *
5470 * Init HW when the current phase is
5471 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5472 * PORT-only and FUNCTION-only HW blocks.
5473 */
5474static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5475                                        const struct bnx2x_func_sp_drv_ops *drv)
5476{
5477        int rc = drv->init_hw_cmn_chip(bp);
5478        if (rc)
5479                return rc;
5480
5481        return bnx2x_func_init_port(bp, drv);
5482}
5483
5484/**
5485 * bnx2x_func_init_cmn - performs HW init at common stage
5486 *
5487 * @bp:         device handle
5488 * @drv:
5489 *
5490 * Init HW when the current phase is
5491 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5492 * PORT-only and FUNCTION-only HW blocks.
5493 */
5494static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5495                                      const struct bnx2x_func_sp_drv_ops *drv)
5496{
5497        int rc = drv->init_hw_cmn(bp);
5498        if (rc)
5499                return rc;
5500
5501        return bnx2x_func_init_port(bp, drv);
5502}
5503
5504static int bnx2x_func_hw_init(struct bnx2x *bp,
5505                              struct bnx2x_func_state_params *params)
5506{
5507        u32 load_code = params->params.hw_init.load_phase;
5508        struct bnx2x_func_sp_obj *o = params->f_obj;
5509        const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5510        int rc = 0;
5511
5512        DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5513                         BP_ABS_FUNC(bp), load_code);
5514
5515        /* Prepare buffers for unzipping the FW */
5516        rc = drv->gunzip_init(bp);
5517        if (rc)
5518                return rc;
5519
5520        /* Prepare FW */
5521        rc = drv->init_fw(bp);
5522        if (rc) {
5523                BNX2X_ERR("Error loading firmware\n");
5524                goto init_err;
5525        }
5526
5527        /* Handle the beginning of COMMON_XXX pases separately... */
5528        switch (load_code) {
5529        case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5530                rc = bnx2x_func_init_cmn_chip(bp, drv);
5531                if (rc)
5532                        goto init_err;
5533
5534                break;
5535        case FW_MSG_CODE_DRV_LOAD_COMMON:
5536                rc = bnx2x_func_init_cmn(bp, drv);
5537                if (rc)
5538                        goto init_err;
5539
5540                break;
5541        case FW_MSG_CODE_DRV_LOAD_PORT:
5542                rc = bnx2x_func_init_port(bp, drv);
5543                if (rc)
5544                        goto init_err;
5545
5546                break;
5547        case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5548                rc = bnx2x_func_init_func(bp, drv);
5549                if (rc)
5550                        goto init_err;
5551
5552                break;
5553        default:
5554                BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5555                rc = -EINVAL;
5556        }
5557
5558init_err:
5559        drv->gunzip_end(bp);
5560
5561        /* In case of success, complete the command immediately: no ramrods
5562         * have been sent.
5563         */
5564        if (!rc)
5565                o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5566
5567        return rc;
5568}
5569
5570/**
5571 * bnx2x_func_reset_func - reset HW at function stage
5572 *
5573 * @bp:         device handle
5574 * @drv:
5575 *
5576 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5577 * FUNCTION-only HW blocks.
5578 */
5579static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5580                                        const struct bnx2x_func_sp_drv_ops *drv)
5581{
5582        drv->reset_hw_func(bp);
5583}
5584
5585/**
5586 * bnx2x_func_reset_port - reset HW at port stage
5587 *
5588 * @bp:         device handle
5589 * @drv:
5590 *
5591 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5592 * FUNCTION-only and PORT-only HW blocks.
5593 *
5594 *                 !!!IMPORTANT!!!
5595 *
5596 * It's important to call reset_port before reset_func() as the last thing
5597 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5598 * makes impossible any DMAE transactions.
5599 */
5600static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5601                                        const struct bnx2x_func_sp_drv_ops *drv)
5602{
5603        drv->reset_hw_port(bp);
5604        bnx2x_func_reset_func(bp, drv);
5605}
5606
5607/**
5608 * bnx2x_func_reset_cmn - reset HW at common stage
5609 *
5610 * @bp:         device handle
5611 * @drv:
5612 *
5613 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5614 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5615 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5616 */
5617static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5618                                        const struct bnx2x_func_sp_drv_ops *drv)
5619{
5620        bnx2x_func_reset_port(bp, drv);
5621        drv->reset_hw_cmn(bp);
5622}
5623
5624static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5625                                      struct bnx2x_func_state_params *params)
5626{
5627        u32 reset_phase = params->params.hw_reset.reset_phase;
5628        struct bnx2x_func_sp_obj *o = params->f_obj;
5629        const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5630
5631        DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5632                         reset_phase);
5633
5634        switch (reset_phase) {
5635        case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5636                bnx2x_func_reset_cmn(bp, drv);
5637                break;
5638        case FW_MSG_CODE_DRV_UNLOAD_PORT:
5639                bnx2x_func_reset_port(bp, drv);
5640                break;
5641        case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5642                bnx2x_func_reset_func(bp, drv);
5643                break;
5644        default:
5645                BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5646                           reset_phase);
5647                break;
5648        }
5649
5650        /* Complete the command immediately: no ramrods have been sent. */
5651        o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5652
5653        return 0;
5654}
5655
5656static inline int bnx2x_func_send_start(struct bnx2x *bp,
5657                                        struct bnx2x_func_state_params *params)
5658{
5659        struct bnx2x_func_sp_obj *o = params->f_obj;
5660        struct function_start_data *rdata =
5661                (struct function_start_data *)o->rdata;
5662        dma_addr_t data_mapping = o->rdata_mapping;
5663        struct bnx2x_func_start_params *start_params = &params->params.start;
5664
5665        memset(rdata, 0, sizeof(*rdata));
5666
5667        /* Fill the ramrod data with provided parameters */
5668        rdata->function_mode    = (u8)start_params->mf_mode;
5669        rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
5670        rdata->path_id          = BP_PATH(bp);
5671        rdata->network_cos_mode = start_params->network_cos_mode;
5672        rdata->tunnel_mode      = start_params->tunnel_mode;
5673        rdata->gre_tunnel_type  = start_params->gre_tunnel_type;
5674        rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
5675        rdata->vxlan_dst_port   = cpu_to_le16(4789);
5676        rdata->sd_accept_mf_clss_fail = start_params->class_fail;
5677        if (start_params->class_fail_ethtype) {
5678                rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
5679                rdata->sd_accept_mf_clss_fail_ethtype =
5680                        cpu_to_le16(start_params->class_fail_ethtype);
5681        }
5682
5683        rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
5684        rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
5685        if (start_params->sd_vlan_eth_type)
5686                rdata->sd_vlan_eth_type =
5687                        cpu_to_le16(start_params->sd_vlan_eth_type);
5688        else
5689                rdata->sd_vlan_eth_type =
5690                        cpu_to_le16(0x8100);
5691
5692        rdata->no_added_tags = start_params->no_added_tags;
5693        /* No need for an explicit memory barrier here as long we would
5694         * need to ensure the ordering of writing to the SPQ element
5695         * and updating of the SPQ producer which involves a memory
5696         * read and we will have to put a full memory barrier there
5697         * (inside bnx2x_sp_post()).
5698         */
5699
5700        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5701                             U64_HI(data_mapping),
5702                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5703}
5704
5705static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5706                                        struct bnx2x_func_state_params *params)
5707{
5708        struct bnx2x_func_sp_obj *o = params->f_obj;
5709        struct function_update_data *rdata =
5710                (struct function_update_data *)o->rdata;
5711        dma_addr_t data_mapping = o->rdata_mapping;
5712        struct bnx2x_func_switch_update_params *switch_update_params =
5713                &params->params.switch_update;
5714
5715        memset(rdata, 0, sizeof(*rdata));
5716
5717        /* Fill the ramrod data with provided parameters */
5718        if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
5719                     &switch_update_params->changes)) {
5720                rdata->tx_switch_suspend_change_flg = 1;
5721                rdata->tx_switch_suspend =
5722                        test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
5723                                 &switch_update_params->changes);
5724        }
5725
5726        if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
5727                     &switch_update_params->changes)) {
5728                rdata->sd_vlan_tag_change_flg = 1;
5729                rdata->sd_vlan_tag =
5730                        cpu_to_le16(switch_update_params->vlan);
5731        }
5732
5733        if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
5734                     &switch_update_params->changes)) {
5735                rdata->sd_vlan_eth_type_change_flg = 1;
5736                rdata->sd_vlan_eth_type =
5737                        cpu_to_le16(switch_update_params->vlan_eth_type);
5738        }
5739
5740        if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
5741                     &switch_update_params->changes)) {
5742                rdata->sd_vlan_force_pri_change_flg = 1;
5743                if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
5744                             &switch_update_params->changes))
5745                        rdata->sd_vlan_force_pri_flg = 1;
5746                rdata->sd_vlan_force_pri_flg =
5747                        switch_update_params->vlan_force_prio;
5748        }
5749
5750        if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
5751                     &switch_update_params->changes)) {
5752                rdata->update_tunn_cfg_flg = 1;
5753                if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
5754                             &switch_update_params->changes))
5755                        rdata->tunn_clss_en = 1;
5756                if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
5757                             &switch_update_params->changes))
5758                        rdata->inner_gre_rss_en = 1;
5759                rdata->tunnel_mode = switch_update_params->tunnel_mode;
5760                rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
5761                rdata->vxlan_dst_port = cpu_to_le16(4789);
5762        }
5763
5764        rdata->echo = SWITCH_UPDATE;
5765
5766        /* No need for an explicit memory barrier here as long as we
5767         * ensure the ordering of writing to the SPQ element
5768         * and updating of the SPQ producer which involves a memory
5769         * read. If the memory read is removed we will have to put a
5770         * full memory barrier there (inside bnx2x_sp_post()).
5771         */
5772        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5773                             U64_HI(data_mapping),
5774                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5775}
5776
5777static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5778                                         struct bnx2x_func_state_params *params)
5779{
5780        struct bnx2x_func_sp_obj *o = params->f_obj;
5781        struct function_update_data *rdata =
5782                (struct function_update_data *)o->afex_rdata;
5783        dma_addr_t data_mapping = o->afex_rdata_mapping;
5784        struct bnx2x_func_afex_update_params *afex_update_params =
5785                &params->params.afex_update;
5786
5787        memset(rdata, 0, sizeof(*rdata));
5788
5789        /* Fill the ramrod data with provided parameters */
5790        rdata->vif_id_change_flg = 1;
5791        rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5792        rdata->afex_default_vlan_change_flg = 1;
5793        rdata->afex_default_vlan =
5794                cpu_to_le16(afex_update_params->afex_default_vlan);
5795        rdata->allowed_priorities_change_flg = 1;
5796        rdata->allowed_priorities = afex_update_params->allowed_priorities;
5797        rdata->echo = AFEX_UPDATE;
5798
5799        /* No need for an explicit memory barrier here as long as we
5800         * ensure the ordering of writing to the SPQ element
5801         * and updating of the SPQ producer which involves a memory
5802         * read. If the memory read is removed we will have to put a
5803         * full memory barrier there (inside bnx2x_sp_post()).
5804         */
5805        DP(BNX2X_MSG_SP,
5806           "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5807           rdata->vif_id,
5808           rdata->afex_default_vlan, rdata->allowed_priorities);
5809
5810        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5811                             U64_HI(data_mapping),
5812                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5813}
5814
5815static
5816inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5817                                         struct bnx2x_func_state_params *params)
5818{
5819        struct bnx2x_func_sp_obj *o = params->f_obj;
5820        struct afex_vif_list_ramrod_data *rdata =
5821                (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5822        struct bnx2x_func_afex_viflists_params *afex_vif_params =
5823                &params->params.afex_viflists;
5824        u64 *p_rdata = (u64 *)rdata;
5825
5826        memset(rdata, 0, sizeof(*rdata));
5827
5828        /* Fill the ramrod data with provided parameters */
5829        rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5830        rdata->func_bit_map          = afex_vif_params->func_bit_map;
5831        rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5832        rdata->func_to_clear         = afex_vif_params->func_to_clear;
5833
5834        /* send in echo type of sub command */
5835        rdata->echo = afex_vif_params->afex_vif_list_command;
5836
5837        /*  No need for an explicit memory barrier here as long we would
5838         *  need to ensure the ordering of writing to the SPQ element
5839         *  and updating of the SPQ producer which involves a memory
5840         *  read and we will have to put a full memory barrier there
5841         *  (inside bnx2x_sp_post()).
5842         */
5843
5844        DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5845           rdata->afex_vif_list_command, rdata->vif_list_index,
5846           rdata->func_bit_map, rdata->func_to_clear);
5847
5848        /* this ramrod sends data directly and not through DMA mapping */
5849        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5850                             U64_HI(*p_rdata), U64_LO(*p_rdata),
5851                             NONE_CONNECTION_TYPE);
5852}
5853
5854static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5855                                       struct bnx2x_func_state_params *params)
5856{
5857        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5858                             NONE_CONNECTION_TYPE);
5859}
5860
5861static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5862                                       struct bnx2x_func_state_params *params)
5863{
5864        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5865                             NONE_CONNECTION_TYPE);
5866}
5867static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5868                                       struct bnx2x_func_state_params *params)
5869{
5870        struct bnx2x_func_sp_obj *o = params->f_obj;
5871        struct flow_control_configuration *rdata =
5872                (struct flow_control_configuration *)o->rdata;
5873        dma_addr_t data_mapping = o->rdata_mapping;
5874        struct bnx2x_func_tx_start_params *tx_start_params =
5875                &params->params.tx_start;
5876        int i;
5877
5878        memset(rdata, 0, sizeof(*rdata));
5879
5880        rdata->dcb_enabled = tx_start_params->dcb_enabled;
5881        rdata->dcb_version = tx_start_params->dcb_version;
5882        rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5883
5884        for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5885                rdata->traffic_type_to_priority_cos[i] =
5886                        tx_start_params->traffic_type_to_priority_cos[i];
5887
5888        /* No need for an explicit memory barrier here as long as we
5889         * ensure the ordering of writing to the SPQ element
5890         * and updating of the SPQ producer which involves a memory
5891         * read. If the memory read is removed we will have to put a
5892         * full memory barrier there (inside bnx2x_sp_post()).
5893         */
5894        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5895                             U64_HI(data_mapping),
5896                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5897}
5898
5899static inline
5900int bnx2x_func_send_set_timesync(struct bnx2x *bp,
5901                                 struct bnx2x_func_state_params *params)
5902{
5903        struct bnx2x_func_sp_obj *o = params->f_obj;
5904        struct set_timesync_ramrod_data *rdata =
5905                (struct set_timesync_ramrod_data *)o->rdata;
5906        dma_addr_t data_mapping = o->rdata_mapping;
5907        struct bnx2x_func_set_timesync_params *set_timesync_params =
5908                &params->params.set_timesync;
5909
5910        memset(rdata, 0, sizeof(*rdata));
5911
5912        /* Fill the ramrod data with provided parameters */
5913        rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
5914        rdata->offset_cmd = set_timesync_params->offset_cmd;
5915        rdata->add_sub_drift_adjust_value =
5916                set_timesync_params->add_sub_drift_adjust_value;
5917        rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
5918        rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
5919        rdata->offset_delta.lo =
5920                cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
5921        rdata->offset_delta.hi =
5922                cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
5923
5924        DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
5925           rdata->drift_adjust_cmd, rdata->offset_cmd,
5926           rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
5927           rdata->drift_adjust_period, rdata->offset_delta.lo,
5928           rdata->offset_delta.hi);
5929
5930        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
5931                             U64_HI(data_mapping),
5932                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5933}
5934
5935static int bnx2x_func_send_cmd(struct bnx2x *bp,
5936                               struct bnx2x_func_state_params *params)
5937{
5938        switch (params->cmd) {
5939        case BNX2X_F_CMD_HW_INIT:
5940                return bnx2x_func_hw_init(bp, params);
5941        case BNX2X_F_CMD_START:
5942                return bnx2x_func_send_start(bp, params);
5943        case BNX2X_F_CMD_STOP:
5944                return bnx2x_func_send_stop(bp, params);
5945        case BNX2X_F_CMD_HW_RESET:
5946                return bnx2x_func_hw_reset(bp, params);
5947        case BNX2X_F_CMD_AFEX_UPDATE:
5948                return bnx2x_func_send_afex_update(bp, params);
5949        case BNX2X_F_CMD_AFEX_VIFLISTS:
5950                return bnx2x_func_send_afex_viflists(bp, params);
5951        case BNX2X_F_CMD_TX_STOP:
5952                return bnx2x_func_send_tx_stop(bp, params);
5953        case BNX2X_F_CMD_TX_START:
5954                return bnx2x_func_send_tx_start(bp, params);
5955        case BNX2X_F_CMD_SWITCH_UPDATE:
5956                return bnx2x_func_send_switch_update(bp, params);
5957        case BNX2X_F_CMD_SET_TIMESYNC:
5958                return bnx2x_func_send_set_timesync(bp, params);
5959        default:
5960                BNX2X_ERR("Unknown command: %d\n", params->cmd);
5961                return -EINVAL;
5962        }
5963}
5964
5965void bnx2x_init_func_obj(struct bnx2x *bp,
5966                         struct bnx2x_func_sp_obj *obj,
5967                         void *rdata, dma_addr_t rdata_mapping,
5968                         void *afex_rdata, dma_addr_t afex_rdata_mapping,
5969                         struct bnx2x_func_sp_drv_ops *drv_iface)
5970{
5971        memset(obj, 0, sizeof(*obj));
5972
5973        mutex_init(&obj->one_pending_mutex);
5974
5975        obj->rdata = rdata;
5976        obj->rdata_mapping = rdata_mapping;
5977        obj->afex_rdata = afex_rdata;
5978        obj->afex_rdata_mapping = afex_rdata_mapping;
5979        obj->send_cmd = bnx2x_func_send_cmd;
5980        obj->check_transition = bnx2x_func_chk_transition;
5981        obj->complete_cmd = bnx2x_func_comp_cmd;
5982        obj->wait_comp = bnx2x_func_wait_comp;
5983
5984        obj->drv = drv_iface;
5985}
5986
5987/**
5988 * bnx2x_func_state_change - perform Function state change transition
5989 *
5990 * @bp:         device handle
5991 * @params:     parameters to perform the transaction
5992 *
5993 * returns 0 in case of successfully completed transition,
5994 *         negative error code in case of failure, positive
5995 *         (EBUSY) value if there is a completion to that is
5996 *         still pending (possible only if RAMROD_COMP_WAIT is
5997 *         not set in params->ramrod_flags for asynchronous
5998 *         commands).
5999 */
6000int bnx2x_func_state_change(struct bnx2x *bp,
6001                            struct bnx2x_func_state_params *params)
6002{
6003        struct bnx2x_func_sp_obj *o = params->f_obj;
6004        int rc, cnt = 300;
6005        enum bnx2x_func_cmd cmd = params->cmd;
6006        unsigned long *pending = &o->pending;
6007
6008        mutex_lock(&o->one_pending_mutex);
6009
6010        /* Check that the requested transition is legal */
6011        rc = o->check_transition(bp, o, params);
6012        if ((rc == -EBUSY) &&
6013            (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
6014                while ((rc == -EBUSY) && (--cnt > 0)) {
6015                        mutex_unlock(&o->one_pending_mutex);
6016                        msleep(10);
6017                        mutex_lock(&o->one_pending_mutex);
6018                        rc = o->check_transition(bp, o, params);
6019                }
6020                if (rc == -EBUSY) {
6021                        mutex_unlock(&o->one_pending_mutex);
6022                        BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6023                        return rc;
6024                }
6025        } else if (rc) {
6026                mutex_unlock(&o->one_pending_mutex);
6027                return rc;
6028        }
6029
6030        /* Set "pending" bit */
6031        set_bit(cmd, pending);
6032
6033        /* Don't send a command if only driver cleanup was requested */
6034        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6035                bnx2x_func_state_change_comp(bp, o, cmd);
6036                mutex_unlock(&o->one_pending_mutex);
6037        } else {
6038                /* Send a ramrod */
6039                rc = o->send_cmd(bp, params);
6040
6041                mutex_unlock(&o->one_pending_mutex);
6042
6043                if (rc) {
6044                        o->next_state = BNX2X_F_STATE_MAX;
6045                        clear_bit(cmd, pending);
6046                        smp_mb__after_atomic();
6047                        return rc;
6048                }
6049
6050                if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6051                        rc = o->wait_comp(bp, o, cmd);
6052                        if (rc)
6053                                return rc;
6054
6055                        return 0;
6056                }
6057        }
6058
6059        return !!test_bit(cmd, pending);
6060}
6061