linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
<<
>>
Prefs
   1/* bnx2x_cmn.h: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2007-2011 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10 * Written by: Eliezer Tamir
  11 * Based on code from Michael Chan's bnx2 driver
  12 * UDP CSUM errata workaround by Arik Gendelman
  13 * Slowpath and fastpath rework by Vladislav Zolotarov
  14 * Statistics and Link management by Yitchak Gertner
  15 *
  16 */
  17#ifndef BNX2X_CMN_H
  18#define BNX2X_CMN_H
  19
  20#include <linux/types.h>
  21#include <linux/pci.h>
  22#include <linux/netdevice.h>
  23
  24
  25#include "bnx2x.h"
  26
  27/* This is used as a replacement for an MCP if it's not present */
  28extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
  29
  30extern int num_queues;
  31
  32/************************ Macros ********************************/
  33#define BNX2X_PCI_FREE(x, y, size) \
  34        do { \
  35                if (x) { \
  36                        dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
  37                        x = NULL; \
  38                        y = 0; \
  39                } \
  40        } while (0)
  41
  42#define BNX2X_FREE(x) \
  43        do { \
  44                if (x) { \
  45                        kfree((void *)x); \
  46                        x = NULL; \
  47                } \
  48        } while (0)
  49
  50#define BNX2X_PCI_ALLOC(x, y, size) \
  51        do { \
  52                x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
  53                if (x == NULL) \
  54                        goto alloc_mem_err; \
  55                memset((void *)x, 0, size); \
  56        } while (0)
  57
  58#define BNX2X_ALLOC(x, size) \
  59        do { \
  60                x = kzalloc(size, GFP_KERNEL); \
  61                if (x == NULL) \
  62                        goto alloc_mem_err; \
  63        } while (0)
  64
  65/*********************** Interfaces ****************************
  66 *  Functions that need to be implemented by each driver version
  67 */
  68/* Init */
  69
  70/**
  71 * bnx2x_send_unload_req - request unload mode from the MCP.
  72 *
  73 * @bp:                 driver handle
  74 * @unload_mode:        requested function's unload mode
  75 *
  76 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
  77 */
  78u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
  79
  80/**
  81 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  82 *
  83 * @bp:         driver handle
  84 */
  85void bnx2x_send_unload_done(struct bnx2x *bp);
  86
  87/**
  88 * bnx2x_config_rss_pf - configure RSS parameters.
  89 *
  90 * @bp:                 driver handle
  91 * @ind_table:          indirection table to configure
  92 * @config_hash:        re-configure RSS hash keys configuration
  93 */
  94int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
  95
  96/**
  97 * bnx2x__init_func_obj - init function object
  98 *
  99 * @bp:                 driver handle
 100 *
 101 * Initializes the Function Object with the appropriate
 102 * parameters which include a function slow path driver
 103 * interface.
 104 */
 105void bnx2x__init_func_obj(struct bnx2x *bp);
 106
 107/**
 108 * bnx2x_setup_queue - setup eth queue.
 109 *
 110 * @bp:         driver handle
 111 * @fp:         pointer to the fastpath structure
 112 * @leading:    boolean
 113 *
 114 */
 115int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 116                       bool leading);
 117
 118/**
 119 * bnx2x_setup_leading - bring up a leading eth queue.
 120 *
 121 * @bp:         driver handle
 122 */
 123int bnx2x_setup_leading(struct bnx2x *bp);
 124
 125/**
 126 * bnx2x_fw_command - send the MCP a request
 127 *
 128 * @bp:         driver handle
 129 * @command:    request
 130 * @param:      request's parameter
 131 *
 132 * block until there is a reply
 133 */
 134u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 135
 136/**
 137 * bnx2x_initial_phy_init - initialize link parameters structure variables.
 138 *
 139 * @bp:         driver handle
 140 * @load_mode:  current mode
 141 */
 142u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
 143
 144/**
 145 * bnx2x_link_set - configure hw according to link parameters structure.
 146 *
 147 * @bp:         driver handle
 148 */
 149void bnx2x_link_set(struct bnx2x *bp);
 150
 151/**
 152 * bnx2x_link_test - query link status.
 153 *
 154 * @bp:         driver handle
 155 * @is_serdes:  bool
 156 *
 157 * Returns 0 if link is UP.
 158 */
 159u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
 160
 161/**
 162 * bnx2x_drv_pulse - write driver pulse to shmem
 163 *
 164 * @bp:         driver handle
 165 *
 166 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
 167 * in the shmem.
 168 */
 169void bnx2x_drv_pulse(struct bnx2x *bp);
 170
 171/**
 172 * bnx2x_igu_ack_sb - update IGU with current SB value
 173 *
 174 * @bp:         driver handle
 175 * @igu_sb_id:  SB id
 176 * @segment:    SB segment
 177 * @index:      SB index
 178 * @op:         SB operation
 179 * @update:     is HW update required
 180 */
 181void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
 182                      u16 index, u8 op, u8 update);
 183
 184/* Disable transactions from chip to host */
 185void bnx2x_pf_disable(struct bnx2x *bp);
 186
 187/**
 188 * bnx2x__link_status_update - handles link status change.
 189 *
 190 * @bp:         driver handle
 191 */
 192void bnx2x__link_status_update(struct bnx2x *bp);
 193
 194/**
 195 * bnx2x_link_report - report link status to upper layer.
 196 *
 197 * @bp:         driver handle
 198 */
 199void bnx2x_link_report(struct bnx2x *bp);
 200
 201/* None-atomic version of bnx2x_link_report() */
 202void __bnx2x_link_report(struct bnx2x *bp);
 203
 204/**
 205 * bnx2x_get_mf_speed - calculate MF speed.
 206 *
 207 * @bp:         driver handle
 208 *
 209 * Takes into account current linespeed and MF configuration.
 210 */
 211u16 bnx2x_get_mf_speed(struct bnx2x *bp);
 212
 213/**
 214 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
 215 *
 216 * @irq:                irq number
 217 * @dev_instance:       private instance
 218 */
 219irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
 220
 221/**
 222 * bnx2x_interrupt - non MSI-X interrupt handler
 223 *
 224 * @irq:                irq number
 225 * @dev_instance:       private instance
 226 */
 227irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
 228#ifdef BCM_CNIC
 229
 230/**
 231 * bnx2x_cnic_notify - send command to cnic driver
 232 *
 233 * @bp:         driver handle
 234 * @cmd:        command
 235 */
 236int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
 237
 238/**
 239 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
 240 *
 241 * @bp:         driver handle
 242 */
 243void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
 244#endif
 245
 246/**
 247 * bnx2x_int_enable - enable HW interrupts.
 248 *
 249 * @bp:         driver handle
 250 */
 251void bnx2x_int_enable(struct bnx2x *bp);
 252
 253/**
 254 * bnx2x_int_disable_sync - disable interrupts.
 255 *
 256 * @bp:         driver handle
 257 * @disable_hw: true, disable HW interrupts.
 258 *
 259 * This function ensures that there are no
 260 * ISRs or SP DPCs (sp_task) are running after it returns.
 261 */
 262void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
 263
 264/**
 265 * bnx2x_nic_init - init driver internals.
 266 *
 267 * @bp:         driver handle
 268 * @load_code:  COMMON, PORT or FUNCTION
 269 *
 270 * Initializes:
 271 *  - rings
 272 *  - status blocks
 273 *  - etc.
 274 */
 275void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
 276
 277/**
 278 * bnx2x_alloc_mem - allocate driver's memory.
 279 *
 280 * @bp:         driver handle
 281 */
 282int bnx2x_alloc_mem(struct bnx2x *bp);
 283
 284/**
 285 * bnx2x_free_mem - release driver's memory.
 286 *
 287 * @bp:         driver handle
 288 */
 289void bnx2x_free_mem(struct bnx2x *bp);
 290
 291/**
 292 * bnx2x_set_num_queues - set number of queues according to mode.
 293 *
 294 * @bp:         driver handle
 295 */
 296void bnx2x_set_num_queues(struct bnx2x *bp);
 297
 298/**
 299 * bnx2x_chip_cleanup - cleanup chip internals.
 300 *
 301 * @bp:                 driver handle
 302 * @unload_mode:        COMMON, PORT, FUNCTION
 303 *
 304 * - Cleanup MAC configuration.
 305 * - Closes clients.
 306 * - etc.
 307 */
 308void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
 309
 310/**
 311 * bnx2x_acquire_hw_lock - acquire HW lock.
 312 *
 313 * @bp:         driver handle
 314 * @resource:   resource bit which was locked
 315 */
 316int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
 317
 318/**
 319 * bnx2x_release_hw_lock - release HW lock.
 320 *
 321 * @bp:         driver handle
 322 * @resource:   resource bit which was locked
 323 */
 324int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
 325
 326/**
 327 * bnx2x_release_leader_lock - release recovery leader lock
 328 *
 329 * @bp:         driver handle
 330 */
 331int bnx2x_release_leader_lock(struct bnx2x *bp);
 332
 333/**
 334 * bnx2x_set_eth_mac - configure eth MAC address in the HW
 335 *
 336 * @bp:         driver handle
 337 * @set:        set or clear
 338 *
 339 * Configures according to the value in netdev->dev_addr.
 340 */
 341int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
 342
 343/**
 344 * bnx2x_set_rx_mode - set MAC filtering configurations.
 345 *
 346 * @dev:        netdevice
 347 *
 348 * called with netif_tx_lock from dev_mcast.c
 349 * If bp->state is OPEN, should be called with
 350 * netif_addr_lock_bh()
 351 */
 352void bnx2x_set_rx_mode(struct net_device *dev);
 353
 354/**
 355 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
 356 *
 357 * @bp:         driver handle
 358 *
 359 * If bp->state is OPEN, should be called with
 360 * netif_addr_lock_bh().
 361 */
 362void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 363
 364/**
 365 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
 366 *
 367 * @bp:                 driver handle
 368 * @cl_id:              client id
 369 * @rx_mode_flags:      rx mode configuration
 370 * @rx_accept_flags:    rx accept configuration
 371 * @tx_accept_flags:    tx accept configuration (tx switch)
 372 * @ramrod_flags:       ramrod configuration
 373 */
 374void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 375                         unsigned long rx_mode_flags,
 376                         unsigned long rx_accept_flags,
 377                         unsigned long tx_accept_flags,
 378                         unsigned long ramrod_flags);
 379
 380/* Parity errors related */
 381void bnx2x_inc_load_cnt(struct bnx2x *bp);
 382u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
 383bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
 384bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
 385void bnx2x_set_reset_in_progress(struct bnx2x *bp);
 386void bnx2x_set_reset_global(struct bnx2x *bp);
 387void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 388
 389/**
 390 * bnx2x_sp_event - handle ramrods completion.
 391 *
 392 * @fp:         fastpath handle for the event
 393 * @rr_cqe:     eth_rx_cqe
 394 */
 395void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
 396
 397/**
 398 * bnx2x_ilt_set_info - prepare ILT configurations.
 399 *
 400 * @bp:         driver handle
 401 */
 402void bnx2x_ilt_set_info(struct bnx2x *bp);
 403
 404/**
 405 * bnx2x_dcbx_init - initialize dcbx protocol.
 406 *
 407 * @bp:         driver handle
 408 */
 409void bnx2x_dcbx_init(struct bnx2x *bp);
 410
 411/**
 412 * bnx2x_set_power_state - set power state to the requested value.
 413 *
 414 * @bp:         driver handle
 415 * @state:      required state D0 or D3hot
 416 *
 417 * Currently only D0 and D3hot are supported.
 418 */
 419int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
 420
 421/**
 422 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
 423 *
 424 * @bp:         driver handle
 425 * @value:      new value
 426 */
 427void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
 428/* Error handling */
 429void bnx2x_panic_dump(struct bnx2x *bp);
 430
 431void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 432
 433/* dev_close main block */
 434int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
 435
 436/* dev_open main block */
 437int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
 438
 439/* hard_xmit callback */
 440netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
 441
 442/* setup_tc callback */
 443int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
 444
 445/* select_queue callback */
 446u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
 447
 448/* reload helper */
 449int bnx2x_reload_if_running(struct net_device *dev);
 450
 451int bnx2x_change_mac_addr(struct net_device *dev, void *p);
 452
 453/* NAPI poll Rx part */
 454int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
 455
 456void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 457                        u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
 458
 459/* NAPI poll Tx part */
 460int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
 461
 462/* suspend/resume callbacks */
 463int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
 464int bnx2x_resume(struct pci_dev *pdev);
 465
 466/* Release IRQ vectors */
 467void bnx2x_free_irq(struct bnx2x *bp);
 468
 469void bnx2x_free_fp_mem(struct bnx2x *bp);
 470int bnx2x_alloc_fp_mem(struct bnx2x *bp);
 471void bnx2x_init_rx_rings(struct bnx2x *bp);
 472void bnx2x_free_skbs(struct bnx2x *bp);
 473void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
 474void bnx2x_netif_start(struct bnx2x *bp);
 475
 476/**
 477 * bnx2x_enable_msix - set msix configuration.
 478 *
 479 * @bp:         driver handle
 480 *
 481 * fills msix_table, requests vectors, updates num_queues
 482 * according to number of available vectors.
 483 */
 484int bnx2x_enable_msix(struct bnx2x *bp);
 485
 486/**
 487 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
 488 *
 489 * @bp:         driver handle
 490 */
 491int bnx2x_enable_msi(struct bnx2x *bp);
 492
 493/**
 494 * bnx2x_poll - NAPI callback
 495 *
 496 * @napi:       napi structure
 497 * @budget:
 498 *
 499 */
 500int bnx2x_poll(struct napi_struct *napi, int budget);
 501
 502/**
 503 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
 504 *
 505 * @bp:         driver handle
 506 */
 507int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
 508
 509/**
 510 * bnx2x_free_mem_bp - release memories outsize main driver structure
 511 *
 512 * @bp:         driver handle
 513 */
 514void bnx2x_free_mem_bp(struct bnx2x *bp);
 515
 516/**
 517 * bnx2x_change_mtu - change mtu netdev callback
 518 *
 519 * @dev:        net device
 520 * @new_mtu:    requested mtu
 521 *
 522 */
 523int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
 524
 525#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
 526/**
 527 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
 528 *
 529 * @dev:        net_device
 530 * @wwn:        output buffer
 531 * @type:       WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
 532 *
 533 */
 534int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
 535#endif
 536u32 bnx2x_fix_features(struct net_device *dev, u32 features);
 537int bnx2x_set_features(struct net_device *dev, u32 features);
 538
 539/**
 540 * bnx2x_tx_timeout - tx timeout netdev callback
 541 *
 542 * @dev:        net device
 543 */
 544void bnx2x_tx_timeout(struct net_device *dev);
 545
 546/*********************** Inlines **********************************/
 547/*********************** Fast path ********************************/
 548static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
 549{
 550        barrier(); /* status block is written to by the chip */
 551        fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
 552}
 553
 554static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
 555                        struct bnx2x_fastpath *fp, u16 bd_prod,
 556                        u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
 557{
 558        struct ustorm_eth_rx_producers rx_prods = {0};
 559        u32 i;
 560
 561        /* Update producers */
 562        rx_prods.bd_prod = bd_prod;
 563        rx_prods.cqe_prod = rx_comp_prod;
 564        rx_prods.sge_prod = rx_sge_prod;
 565
 566        /*
 567         * Make sure that the BD and SGE data is updated before updating the
 568         * producers since FW might read the BD/SGE right after the producer
 569         * is updated.
 570         * This is only applicable for weak-ordered memory model archs such
 571         * as IA-64. The following barrier is also mandatory since FW will
 572         * assumes BDs must have buffers.
 573         */
 574        wmb();
 575
 576        for (i = 0; i < sizeof(rx_prods)/4; i++)
 577                REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
 578
 579        mmiowb(); /* keep prod updates ordered */
 580
 581        DP(NETIF_MSG_RX_STATUS,
 582           "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
 583           fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
 584}
 585
 586static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
 587                                        u8 segment, u16 index, u8 op,
 588                                        u8 update, u32 igu_addr)
 589{
 590        struct igu_regular cmd_data = {0};
 591
 592        cmd_data.sb_id_and_flags =
 593                        ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
 594                         (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
 595                         (update << IGU_REGULAR_BUPDATE_SHIFT) |
 596                         (op << IGU_REGULAR_ENABLE_INT_SHIFT));
 597
 598        DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
 599           cmd_data.sb_id_and_flags, igu_addr);
 600        REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
 601
 602        /* Make sure that ACK is written */
 603        mmiowb();
 604        barrier();
 605}
 606
 607static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
 608                                          u8 idu_sb_id, bool is_Pf)
 609{
 610        u32 data, ctl, cnt = 100;
 611        u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
 612        u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
 613        u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
 614        u32 sb_bit =  1 << (idu_sb_id%32);
 615        u32 func_encode = func |
 616                        ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
 617        u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
 618
 619        /* Not supported in BC mode */
 620        if (CHIP_INT_MODE_IS_BC(bp))
 621                return;
 622
 623        data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
 624                        << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
 625                IGU_REGULAR_CLEANUP_SET                         |
 626                IGU_REGULAR_BCLEANUP;
 627
 628        ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
 629              func_encode << IGU_CTRL_REG_FID_SHIFT             |
 630              IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
 631
 632        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 633                         data, igu_addr_data);
 634        REG_WR(bp, igu_addr_data, data);
 635        mmiowb();
 636        barrier();
 637        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 638                          ctl, igu_addr_ctl);
 639        REG_WR(bp, igu_addr_ctl, ctl);
 640        mmiowb();
 641        barrier();
 642
 643        /* wait for clean up to finish */
 644        while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
 645                msleep(20);
 646
 647
 648        if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
 649                DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
 650                          "idu_sb_id %d offset %d bit %d (cnt %d)\n",
 651                          idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
 652        }
 653}
 654
 655static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
 656                                   u8 storm, u16 index, u8 op, u8 update)
 657{
 658        u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
 659                       COMMAND_REG_INT_ACK);
 660        struct igu_ack_register igu_ack;
 661
 662        igu_ack.status_block_index = index;
 663        igu_ack.sb_id_and_flags =
 664                        ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
 665                         (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
 666                         (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
 667                         (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
 668
 669        DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
 670           (*(u32 *)&igu_ack), hc_addr);
 671        REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
 672
 673        /* Make sure that ACK is written */
 674        mmiowb();
 675        barrier();
 676}
 677
 678static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
 679                                u16 index, u8 op, u8 update)
 680{
 681        if (bp->common.int_block == INT_BLOCK_HC)
 682                bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
 683        else {
 684                u8 segment;
 685
 686                if (CHIP_INT_MODE_IS_BC(bp))
 687                        segment = storm;
 688                else if (igu_sb_id != bp->igu_dsb_id)
 689                        segment = IGU_SEG_ACCESS_DEF;
 690                else if (storm == ATTENTION_ID)
 691                        segment = IGU_SEG_ACCESS_ATTN;
 692                else
 693                        segment = IGU_SEG_ACCESS_DEF;
 694                bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
 695        }
 696}
 697
 698static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
 699{
 700        u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
 701                       COMMAND_REG_SIMD_MASK);
 702        u32 result = REG_RD(bp, hc_addr);
 703
 704        DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
 705           result, hc_addr);
 706
 707        barrier();
 708        return result;
 709}
 710
 711static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
 712{
 713        u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
 714        u32 result = REG_RD(bp, igu_addr);
 715
 716        DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
 717           result, igu_addr);
 718
 719        barrier();
 720        return result;
 721}
 722
 723static inline u16 bnx2x_ack_int(struct bnx2x *bp)
 724{
 725        barrier();
 726        if (bp->common.int_block == INT_BLOCK_HC)
 727                return bnx2x_hc_ack_int(bp);
 728        else
 729                return bnx2x_igu_ack_int(bp);
 730}
 731
 732static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
 733{
 734        /* Tell compiler that consumer and producer can change */
 735        barrier();
 736        return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
 737}
 738
 739static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
 740                                 struct bnx2x_fp_txdata *txdata)
 741{
 742        s16 used;
 743        u16 prod;
 744        u16 cons;
 745
 746        prod = txdata->tx_bd_prod;
 747        cons = txdata->tx_bd_cons;
 748
 749        /* NUM_TX_RINGS = number of "next-page" entries
 750           It will be used as a threshold */
 751        used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
 752
 753#ifdef BNX2X_STOP_ON_ERROR
 754        WARN_ON(used < 0);
 755        WARN_ON(used > bp->tx_ring_size);
 756        WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
 757#endif
 758
 759        return (s16)(bp->tx_ring_size) - used;
 760}
 761
 762static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
 763{
 764        u16 hw_cons;
 765
 766        /* Tell compiler that status block fields can change */
 767        barrier();
 768        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 769        return hw_cons != txdata->tx_pkt_cons;
 770}
 771
 772static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
 773{
 774        u8 cos;
 775        for_each_cos_in_tx_queue(fp, cos)
 776                if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
 777                        return true;
 778        return false;
 779}
 780
 781static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
 782{
 783        u16 rx_cons_sb;
 784
 785        /* Tell compiler that status block fields can change */
 786        barrier();
 787        rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
 788        if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
 789                rx_cons_sb++;
 790        return (fp->rx_comp_cons != rx_cons_sb);
 791}
 792
 793/**
 794 * bnx2x_tx_disable - disables tx from stack point of view
 795 *
 796 * @bp:         driver handle
 797 */
 798static inline void bnx2x_tx_disable(struct bnx2x *bp)
 799{
 800        netif_tx_disable(bp->dev);
 801        netif_carrier_off(bp->dev);
 802}
 803
 804static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 805                                     struct bnx2x_fastpath *fp, u16 index)
 806{
 807        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 808        struct page *page = sw_buf->page;
 809        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 810
 811        /* Skip "next page" elements */
 812        if (!page)
 813                return;
 814
 815        dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
 816                       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 817        __free_pages(page, PAGES_PER_SGE_SHIFT);
 818
 819        sw_buf->page = NULL;
 820        sge->addr_hi = 0;
 821        sge->addr_lo = 0;
 822}
 823
 824static inline void bnx2x_add_all_napi(struct bnx2x *bp)
 825{
 826        int i;
 827
 828        /* Add NAPI objects */
 829        for_each_rx_queue(bp, i)
 830                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 831                               bnx2x_poll, BNX2X_NAPI_WEIGHT);
 832}
 833
 834static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 835{
 836        int i;
 837
 838        for_each_rx_queue(bp, i)
 839                netif_napi_del(&bnx2x_fp(bp, i, napi));
 840}
 841
 842static inline void bnx2x_disable_msi(struct bnx2x *bp)
 843{
 844        if (bp->flags & USING_MSIX_FLAG) {
 845                pci_disable_msix(bp->pdev);
 846                bp->flags &= ~USING_MSIX_FLAG;
 847        } else if (bp->flags & USING_MSI_FLAG) {
 848                pci_disable_msi(bp->pdev);
 849                bp->flags &= ~USING_MSI_FLAG;
 850        }
 851}
 852
 853static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
 854{
 855        return  num_queues ?
 856                 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
 857                 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
 858}
 859
 860static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
 861{
 862        int i, j;
 863
 864        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
 865                int idx = RX_SGE_CNT * i - 1;
 866
 867                for (j = 0; j < 2; j++) {
 868                        BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
 869                        idx--;
 870                }
 871        }
 872}
 873
 874static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
 875{
 876        /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
 877        memset(fp->sge_mask, 0xff,
 878               (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
 879
 880        /* Clear the two last indices in the page to 1:
 881           these are the indices that correspond to the "next" element,
 882           hence will never be indicated and should be removed from
 883           the calculations. */
 884        bnx2x_clear_sge_mask_next_elems(fp);
 885}
 886
 887static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
 888                                     struct bnx2x_fastpath *fp, u16 index)
 889{
 890        struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
 891        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 892        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 893        dma_addr_t mapping;
 894
 895        if (unlikely(page == NULL))
 896                return -ENOMEM;
 897
 898        mapping = dma_map_page(&bp->pdev->dev, page, 0,
 899                               SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 900        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 901                __free_pages(page, PAGES_PER_SGE_SHIFT);
 902                return -ENOMEM;
 903        }
 904
 905        sw_buf->page = page;
 906        dma_unmap_addr_set(sw_buf, mapping, mapping);
 907
 908        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 909        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 910
 911        return 0;
 912}
 913
 914static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
 915                                     struct bnx2x_fastpath *fp, u16 index)
 916{
 917        struct sk_buff *skb;
 918        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 919        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 920        dma_addr_t mapping;
 921
 922        skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 923        if (unlikely(skb == NULL))
 924                return -ENOMEM;
 925
 926        mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
 927                                 DMA_FROM_DEVICE);
 928        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 929                dev_kfree_skb_any(skb);
 930                return -ENOMEM;
 931        }
 932
 933        rx_buf->skb = skb;
 934        dma_unmap_addr_set(rx_buf, mapping, mapping);
 935
 936        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 937        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 938
 939        return 0;
 940}
 941
 942/* note that we are not allocating a new skb,
 943 * we are just moving one from cons to prod
 944 * we are not creating a new mapping,
 945 * so there is no need to check for dma_mapping_error().
 946 */
 947static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
 948                                      u16 cons, u16 prod)
 949{
 950        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 951        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 952        struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
 953        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 954
 955        dma_unmap_addr_set(prod_rx_buf, mapping,
 956                           dma_unmap_addr(cons_rx_buf, mapping));
 957        prod_rx_buf->skb = cons_rx_buf->skb;
 958        *prod_bd = *cons_bd;
 959}
 960
 961/************************* Init ******************************************/
 962
 963/**
 964 * bnx2x_func_start - init function
 965 *
 966 * @bp:         driver handle
 967 *
 968 * Must be called before sending CLIENT_SETUP for the first client.
 969 */
 970static inline int bnx2x_func_start(struct bnx2x *bp)
 971{
 972        struct bnx2x_func_state_params func_params = {0};
 973        struct bnx2x_func_start_params *start_params =
 974                &func_params.params.start;
 975
 976        /* Prepare parameters for function state transitions */
 977        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 978
 979        func_params.f_obj = &bp->func_obj;
 980        func_params.cmd = BNX2X_F_CMD_START;
 981
 982        /* Function parameters */
 983        start_params->mf_mode = bp->mf_mode;
 984        start_params->sd_vlan_tag = bp->mf_ov;
 985        if (CHIP_IS_E1x(bp))
 986                start_params->network_cos_mode = OVERRIDE_COS;
 987        else
 988                start_params->network_cos_mode = STATIC_COS;
 989
 990        return bnx2x_func_state_change(bp, &func_params);
 991}
 992
 993
 994/**
 995 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
 996 *
 997 * @fw_hi:      pointer to upper part
 998 * @fw_mid:     pointer to middle part
 999 * @fw_lo:      pointer to lower part
1000 * @mac:        pointer to MAC address
1001 */
1002static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
1003                                         u8 *mac)
1004{
1005        ((u8 *)fw_hi)[0]  = mac[1];
1006        ((u8 *)fw_hi)[1]  = mac[0];
1007        ((u8 *)fw_mid)[0] = mac[3];
1008        ((u8 *)fw_mid)[1] = mac[2];
1009        ((u8 *)fw_lo)[0]  = mac[5];
1010        ((u8 *)fw_lo)[1]  = mac[4];
1011}
1012
1013static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1014                                           struct bnx2x_fastpath *fp, int last)
1015{
1016        int i;
1017
1018        if (fp->disable_tpa)
1019                return;
1020
1021        for (i = 0; i < last; i++)
1022                bnx2x_free_rx_sge(bp, fp, i);
1023}
1024
1025static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1026                                       struct bnx2x_fastpath *fp, int last)
1027{
1028        int i;
1029
1030        for (i = 0; i < last; i++) {
1031                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1032                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1033                struct sk_buff *skb = first_buf->skb;
1034
1035                if (skb == NULL) {
1036                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1037                        continue;
1038                }
1039                if (tpa_info->tpa_state == BNX2X_TPA_START)
1040                        dma_unmap_single(&bp->pdev->dev,
1041                                         dma_unmap_addr(first_buf, mapping),
1042                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1043                dev_kfree_skb(skb);
1044                first_buf->skb = NULL;
1045        }
1046}
1047
1048static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1049{
1050        int i;
1051
1052        for (i = 1; i <= NUM_TX_RINGS; i++) {
1053                struct eth_tx_next_bd *tx_next_bd =
1054                        &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1055
1056                tx_next_bd->addr_hi =
1057                        cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1058                                    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1059                tx_next_bd->addr_lo =
1060                        cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1061                                    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1062        }
1063
1064        SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1065        txdata->tx_db.data.zero_fill1 = 0;
1066        txdata->tx_db.data.prod = 0;
1067
1068        txdata->tx_pkt_prod = 0;
1069        txdata->tx_pkt_cons = 0;
1070        txdata->tx_bd_prod = 0;
1071        txdata->tx_bd_cons = 0;
1072        txdata->tx_pkt = 0;
1073}
1074
1075static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1076{
1077        int i;
1078        u8 cos;
1079
1080        for_each_tx_queue(bp, i)
1081                for_each_cos_in_tx_queue(&bp->fp[i], cos)
1082                        bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1083}
1084
1085static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1086{
1087        int i;
1088
1089        for (i = 1; i <= NUM_RX_RINGS; i++) {
1090                struct eth_rx_bd *rx_bd;
1091
1092                rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
1093                rx_bd->addr_hi =
1094                        cpu_to_le32(U64_HI(fp->rx_desc_mapping +
1095                                    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1096                rx_bd->addr_lo =
1097                        cpu_to_le32(U64_LO(fp->rx_desc_mapping +
1098                                    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1099        }
1100}
1101
1102static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1103{
1104        int i;
1105
1106        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1107                struct eth_rx_sge *sge;
1108
1109                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1110                sge->addr_hi =
1111                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1112                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1113
1114                sge->addr_lo =
1115                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1116                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1117        }
1118}
1119
1120static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
1121{
1122        int i;
1123        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
1124                struct eth_rx_cqe_next_page *nextpg;
1125
1126                nextpg = (struct eth_rx_cqe_next_page *)
1127                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1128                nextpg->addr_hi =
1129                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1130                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1131                nextpg->addr_lo =
1132                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1133                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1134        }
1135}
1136
1137/* Returns the number of actually allocated BDs */
1138static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1139                                      int rx_ring_size)
1140{
1141        struct bnx2x *bp = fp->bp;
1142        u16 ring_prod, cqe_ring_prod;
1143        int i;
1144
1145        fp->rx_comp_cons = 0;
1146        cqe_ring_prod = ring_prod = 0;
1147
1148        /* This routine is called only during fo init so
1149         * fp->eth_q_stats.rx_skb_alloc_failed = 0
1150         */
1151        for (i = 0; i < rx_ring_size; i++) {
1152                if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
1153                        fp->eth_q_stats.rx_skb_alloc_failed++;
1154                        continue;
1155                }
1156                ring_prod = NEXT_RX_IDX(ring_prod);
1157                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1158                WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
1159        }
1160
1161        if (fp->eth_q_stats.rx_skb_alloc_failed)
1162                BNX2X_ERR("was only able to allocate "
1163                          "%d rx skbs on queue[%d]\n",
1164                          (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
1165
1166        fp->rx_bd_prod = ring_prod;
1167        /* Limit the CQE producer by the CQE ring size */
1168        fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1169                               cqe_ring_prod);
1170        fp->rx_pkt = fp->rx_calls = 0;
1171
1172        return i - fp->eth_q_stats.rx_skb_alloc_failed;
1173}
1174
1175/* Statistics ID are global per chip/path, while Client IDs for E1x are per
1176 * port.
1177 */
1178static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
1179{
1180        if (!CHIP_IS_E1x(fp->bp))
1181                return fp->cl_id;
1182        else
1183                return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
1184}
1185
1186static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
1187                                               bnx2x_obj_type obj_type)
1188{
1189        struct bnx2x *bp = fp->bp;
1190
1191        /* Configure classification DBs */
1192        bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
1193                           BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
1194                           bnx2x_sp_mapping(bp, mac_rdata),
1195                           BNX2X_FILTER_MAC_PENDING,
1196                           &bp->sp_state, obj_type,
1197                           &bp->macs_pool);
1198}
1199
1200/**
1201 * bnx2x_get_path_func_num - get number of active functions
1202 *
1203 * @bp:         driver handle
1204 *
1205 * Calculates the number of active (not hidden) functions on the
1206 * current path.
1207 */
1208static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1209{
1210        u8 func_num = 0, i;
1211
1212        /* 57710 has only one function per-port */
1213        if (CHIP_IS_E1(bp))
1214                return 1;
1215
1216        /* Calculate a number of functions enabled on the current
1217         * PATH/PORT.
1218         */
1219        if (CHIP_REV_IS_SLOW(bp)) {
1220                if (IS_MF(bp))
1221                        func_num = 4;
1222                else
1223                        func_num = 2;
1224        } else {
1225                for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1226                        u32 func_config =
1227                                MF_CFG_RD(bp,
1228                                          func_mf_config[BP_PORT(bp) + 2 * i].
1229                                          config);
1230                        func_num +=
1231                                ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1232                }
1233        }
1234
1235        WARN_ON(!func_num);
1236
1237        return func_num;
1238}
1239
1240static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
1241{
1242        /* RX_MODE controlling object */
1243        bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
1244
1245        /* multicast configuration controlling object */
1246        bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
1247                             BP_FUNC(bp), BP_FUNC(bp),
1248                             bnx2x_sp(bp, mcast_rdata),
1249                             bnx2x_sp_mapping(bp, mcast_rdata),
1250                             BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
1251                             BNX2X_OBJ_TYPE_RX);
1252
1253        /* Setup CAM credit pools */
1254        bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
1255                                   bnx2x_get_path_func_num(bp));
1256
1257        /* RSS configuration object */
1258        bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
1259                                  bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
1260                                  bnx2x_sp(bp, rss_rdata),
1261                                  bnx2x_sp_mapping(bp, rss_rdata),
1262                                  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
1263                                  BNX2X_OBJ_TYPE_RX);
1264}
1265
1266static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
1267{
1268        if (CHIP_IS_E1x(fp->bp))
1269                return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
1270        else
1271                return fp->cl_id;
1272}
1273
1274static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1275{
1276        struct bnx2x *bp = fp->bp;
1277
1278        if (!CHIP_IS_E1x(bp))
1279                return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
1280        else
1281                return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
1282}
1283
1284static inline void bnx2x_init_txdata(struct bnx2x *bp,
1285        struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
1286        __le16 *tx_cons_sb)
1287{
1288        txdata->cid = cid;
1289        txdata->txq_index = txq_index;
1290        txdata->tx_cons_sb = tx_cons_sb;
1291
1292        DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n",
1293           txdata->cid, txdata->txq_index);
1294}
1295
1296#ifdef BCM_CNIC
1297static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1298{
1299        return bp->cnic_base_cl_id + cl_idx +
1300                (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1301}
1302
1303static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1304{
1305
1306        /* the 'first' id is allocated for the cnic */
1307        return bp->base_fw_ndsb;
1308}
1309
1310static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1311{
1312        return bp->igu_base_sb;
1313}
1314
1315
1316static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1317{
1318        struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
1319        unsigned long q_type = 0;
1320
1321        bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1322                                                     BNX2X_FCOE_ETH_CL_ID_IDX);
1323        /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
1324         *  16 ETH clients per function when CNIC is enabled!
1325         *
1326         *  Fix it ASAP!!!
1327         */
1328        bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1329        bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1330        bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1331        bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1332
1333        bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
1334                          fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
1335
1336        DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index);
1337
1338        /* qZone id equals to FW (per path) client id */
1339        bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
1340        /* init shortcut */
1341        bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
1342                bnx2x_rx_ustorm_prods_offset(fp);
1343
1344        /* Configure Queue State object */
1345        __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1346        __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1347
1348        /* No multi-CoS for FCoE L2 client */
1349        BUG_ON(fp->max_cos != 1);
1350
1351        bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
1352                             BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1353                             bnx2x_sp_mapping(bp, q_rdata), q_type);
1354
1355        DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
1356                           "igu_sb %d\n",
1357           fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1358           fp->igu_sb_id);
1359}
1360#endif
1361
1362static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1363                                       struct bnx2x_fp_txdata *txdata)
1364{
1365        int cnt = 1000;
1366
1367        while (bnx2x_has_tx_work_unload(txdata)) {
1368                if (!cnt) {
1369                        BNX2X_ERR("timeout waiting for queue[%d]: "
1370                                 "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1371                                  txdata->txq_index, txdata->tx_pkt_prod,
1372                                  txdata->tx_pkt_cons);
1373#ifdef BNX2X_STOP_ON_ERROR
1374                        bnx2x_panic();
1375                        return -EBUSY;
1376#else
1377                        break;
1378#endif
1379                }
1380                cnt--;
1381                usleep_range(1000, 1000);
1382        }
1383
1384        return 0;
1385}
1386
1387int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1388
1389static inline void __storm_memset_struct(struct bnx2x *bp,
1390                                         u32 addr, size_t size, u32 *data)
1391{
1392        int i;
1393        for (i = 0; i < size/4; i++)
1394                REG_WR(bp, addr + (i * 4), data[i]);
1395}
1396
1397static inline void storm_memset_func_cfg(struct bnx2x *bp,
1398                                struct tstorm_eth_function_common_config *tcfg,
1399                                u16 abs_fid)
1400{
1401        size_t size = sizeof(struct tstorm_eth_function_common_config);
1402
1403        u32 addr = BAR_TSTRORM_INTMEM +
1404                        TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
1405
1406        __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
1407}
1408
1409static inline void storm_memset_cmng(struct bnx2x *bp,
1410                                struct cmng_struct_per_port *cmng,
1411                                u8 port)
1412{
1413        size_t size = sizeof(struct cmng_struct_per_port);
1414
1415        u32 addr = BAR_XSTRORM_INTMEM +
1416                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1417
1418        __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1419}
1420
1421/**
1422 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1423 *
1424 * @bp:         driver handle
1425 * @mask:       bits that need to be cleared
1426 */
1427static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
1428{
1429        int tout = 5000; /* Wait for 5 secs tops */
1430
1431        while (tout--) {
1432                smp_mb();
1433                netif_addr_lock_bh(bp->dev);
1434                if (!(bp->sp_state & mask)) {
1435                        netif_addr_unlock_bh(bp->dev);
1436                        return true;
1437                }
1438                netif_addr_unlock_bh(bp->dev);
1439
1440                usleep_range(1000, 1000);
1441        }
1442
1443        smp_mb();
1444
1445        netif_addr_lock_bh(bp->dev);
1446        if (bp->sp_state & mask) {
1447                BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
1448                          "mask 0x%lx\n", bp->sp_state, mask);
1449                netif_addr_unlock_bh(bp->dev);
1450                return false;
1451        }
1452        netif_addr_unlock_bh(bp->dev);
1453
1454        return true;
1455}
1456
1457/**
1458 * bnx2x_set_ctx_validation - set CDU context validation values
1459 *
1460 * @bp:         driver handle
1461 * @cxt:        context of the connection on the host memory
1462 * @cid:        SW CID of the connection to be configured
1463 */
1464void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1465                              u32 cid);
1466
1467void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1468                                    u8 sb_index, u8 disable, u16 usec);
1469void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1470void bnx2x_release_phy_lock(struct bnx2x *bp);
1471
1472/**
1473 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1474 *
1475 * @bp:         driver handle
1476 * @mf_cfg:     MF configuration
1477 *
1478 */
1479static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1480{
1481        u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1482                              FUNC_MF_CFG_MAX_BW_SHIFT;
1483        if (!max_cfg) {
1484                DP(NETIF_MSG_LINK,
1485                   "Max BW configured to 0 - using 100 instead\n");
1486                max_cfg = 100;
1487        }
1488        return max_cfg;
1489}
1490
1491#endif /* BNX2X_CMN_H */
1492