linux/drivers/net/ethernet/brocade/bna/bna.h
<<
>>
Prefs
   1/*
   2 * Linux network driver for Brocade Converged Network Adapter.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License (GPL) Version 2 as
   6 * published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13/*
  14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15 * All rights reserved
  16 * www.brocade.com
  17 */
  18#ifndef __BNA_H__
  19#define __BNA_H__
  20
  21#include "bfa_defs.h"
  22#include "bfa_ioc.h"
  23#include "bfi_enet.h"
  24#include "bna_types.h"
  25
  26extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
  27
  28/*  Macros and constants  */
  29
  30#define BNA_IOC_TIMER_FREQ              200
  31
  32/* Log string size */
  33#define BNA_MESSAGE_SIZE                256
  34
  35#define bna_is_small_rxq(_id) ((_id) & 0x1)
  36
  37#define BNA_MAC_IS_EQUAL(_mac1, _mac2)                                  \
  38        (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
  39
  40#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
  41
  42#define BNA_TO_POWER_OF_2(x)                                            \
  43do {                                                                    \
  44        int _shift = 0;                                                 \
  45        while ((x) && (x) != 1) {                                       \
  46                (x) >>= 1;                                              \
  47                _shift++;                                               \
  48        }                                                               \
  49        (x) <<= _shift;                                                 \
  50} while (0)
  51
  52#define BNA_TO_POWER_OF_2_HIGH(x)                                       \
  53do {                                                                    \
  54        int n = 1;                                                      \
  55        while (n < (x))                                                 \
  56                n <<= 1;                                                \
  57        (x) = n;                                                        \
  58} while (0)
  59
  60/*
  61 * input : _addr-> os dma addr in host endian format,
  62 * output : _bna_dma_addr-> pointer to hw dma addr
  63 */
  64#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr)                          \
  65do {                                                                    \
  66        u64 tmp_addr =                                          \
  67        cpu_to_be64((u64)(_addr));                              \
  68        (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
  69        (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
  70} while (0)
  71
  72/*
  73 * input : _bna_dma_addr-> pointer to hw dma addr
  74 * output : _addr-> os dma addr in host endian format
  75 */
  76#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr)                  \
  77do {                                                            \
  78        (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32)          \
  79        | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
  80} while (0)
  81
  82#define containing_rec(addr, type, field)                               \
  83        ((type *)((unsigned char *)(addr) -                             \
  84        (unsigned char *)(&((type *)0)->field)))
  85
  86#define BNA_TXQ_WI_NEEDED(_vectors)     (((_vectors) + 3) >> 2)
  87
  88/* TxQ element is 64 bytes */
  89#define BNA_TXQ_PAGE_INDEX_MAX          (PAGE_SIZE >> 6)
  90#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT    (PAGE_SHIFT - 6)
  91
  92#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
  93{                                                                       \
  94        unsigned int page_index;        /* index within a page */       \
  95        void *page_addr;                                                \
  96        page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1);          \
  97        (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index);        \
  98        page_addr = (_qpt_ptr)[((_qe_idx) >>  BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
  99        (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
 100}
 101
 102/* RxQ element is 8 bytes */
 103#define BNA_RXQ_PAGE_INDEX_MAX          (PAGE_SIZE >> 3)
 104#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT    (PAGE_SHIFT - 3)
 105
 106#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
 107{                                                                       \
 108        unsigned int page_index;        /* index within a page */       \
 109        void *page_addr;                                                \
 110        page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1);          \
 111        (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index);        \
 112        page_addr = (_qpt_ptr)[((_qe_idx) >>                            \
 113                                BNA_RXQ_PAGE_INDEX_MAX_SHIFT)];         \
 114        (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
 115}
 116
 117/* CQ element is 16 bytes */
 118#define BNA_CQ_PAGE_INDEX_MAX           (PAGE_SIZE >> 4)
 119#define BNA_CQ_PAGE_INDEX_MAX_SHIFT     (PAGE_SHIFT - 4)
 120
 121#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range)  \
 122{                                                                       \
 123        unsigned int page_index;          /* index within a page */     \
 124        void *page_addr;                                                \
 125                                                                        \
 126        page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1);           \
 127        (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index);         \
 128        page_addr = (_qpt_ptr)[((_qe_idx) >>                            \
 129                                    BNA_CQ_PAGE_INDEX_MAX_SHIFT)];      \
 130        (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
 131}
 132
 133#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base)                      \
 134        (&((_cast *)(_q_base))[(_qe_idx)])
 135
 136#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
 137
 138#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth)                     \
 139        ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
 140
 141#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth)
 142
 143#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)            \
 144        (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
 145
 146#define BNA_QE_FREE_CNT(_q_ptr, _q_depth)                               \
 147        (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) &    \
 148         ((_q_depth) - 1))
 149
 150#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth)                             \
 151        ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) &      \
 152         (_q_depth - 1))
 153
 154#define BNA_Q_GET_CI(_q_ptr)            ((_q_ptr)->q.consumer_index)
 155
 156#define BNA_Q_GET_PI(_q_ptr)            ((_q_ptr)->q.producer_index)
 157
 158#define BNA_Q_PI_ADD(_q_ptr, _num)                                      \
 159        (_q_ptr)->q.producer_index =                                    \
 160                (((_q_ptr)->q.producer_index + (_num)) &                \
 161                ((_q_ptr)->q.q_depth - 1))
 162
 163#define BNA_Q_CI_ADD(_q_ptr, _num)                                      \
 164        (_q_ptr)->q.consumer_index =                                    \
 165                (((_q_ptr)->q.consumer_index + (_num))                  \
 166                & ((_q_ptr)->q.q_depth - 1))
 167
 168#define BNA_Q_FREE_COUNT(_q_ptr)                                        \
 169        (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
 170
 171#define BNA_Q_IN_USE_COUNT(_q_ptr)                                      \
 172        (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
 173
 174#define BNA_LARGE_PKT_SIZE              1000
 175
 176#define BNA_UPDATE_PKT_CNT(_pkt, _len)                                  \
 177do {                                                                    \
 178        if ((_len) > BNA_LARGE_PKT_SIZE) {                              \
 179                (_pkt)->large_pkt_cnt++;                                \
 180        } else {                                                        \
 181                (_pkt)->small_pkt_cnt++;                                \
 182        }                                                               \
 183} while (0)
 184
 185#define call_rxf_stop_cbfn(rxf)                                         \
 186do {                                                                    \
 187        if ((rxf)->stop_cbfn) {                                         \
 188                void (*cbfn)(struct bna_rx *);                  \
 189                struct bna_rx *cbarg;                                   \
 190                cbfn = (rxf)->stop_cbfn;                                \
 191                cbarg = (rxf)->stop_cbarg;                              \
 192                (rxf)->stop_cbfn = NULL;                                \
 193                (rxf)->stop_cbarg = NULL;                               \
 194                cbfn(cbarg);                                            \
 195        }                                                               \
 196} while (0)
 197
 198#define call_rxf_start_cbfn(rxf)                                        \
 199do {                                                                    \
 200        if ((rxf)->start_cbfn) {                                        \
 201                void (*cbfn)(struct bna_rx *);                  \
 202                struct bna_rx *cbarg;                                   \
 203                cbfn = (rxf)->start_cbfn;                               \
 204                cbarg = (rxf)->start_cbarg;                             \
 205                (rxf)->start_cbfn = NULL;                               \
 206                (rxf)->start_cbarg = NULL;                              \
 207                cbfn(cbarg);                                            \
 208        }                                                               \
 209} while (0)
 210
 211#define call_rxf_cam_fltr_cbfn(rxf)                                     \
 212do {                                                                    \
 213        if ((rxf)->cam_fltr_cbfn) {                                     \
 214                void (*cbfn)(struct bnad *, struct bna_rx *);   \
 215                struct bnad *cbarg;                                     \
 216                cbfn = (rxf)->cam_fltr_cbfn;                            \
 217                cbarg = (rxf)->cam_fltr_cbarg;                          \
 218                (rxf)->cam_fltr_cbfn = NULL;                            \
 219                (rxf)->cam_fltr_cbarg = NULL;                           \
 220                cbfn(cbarg, rxf->rx);                                   \
 221        }                                                               \
 222} while (0)
 223
 224#define call_rxf_pause_cbfn(rxf)                                        \
 225do {                                                                    \
 226        if ((rxf)->oper_state_cbfn) {                                   \
 227                void (*cbfn)(struct bnad *, struct bna_rx *);   \
 228                struct bnad *cbarg;                                     \
 229                cbfn = (rxf)->oper_state_cbfn;                          \
 230                cbarg = (rxf)->oper_state_cbarg;                        \
 231                (rxf)->oper_state_cbfn = NULL;                          \
 232                (rxf)->oper_state_cbarg = NULL;                         \
 233                cbfn(cbarg, rxf->rx);                                   \
 234        }                                                               \
 235} while (0)
 236
 237#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
 238
 239#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
 240
 241#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
 242
 243#define xxx_enable(mode, bitmask, xxx)                                  \
 244do {                                                                    \
 245        bitmask |= xxx;                                                 \
 246        mode |= xxx;                                                    \
 247} while (0)
 248
 249#define xxx_disable(mode, bitmask, xxx)                                 \
 250do {                                                                    \
 251        bitmask |= xxx;                                                 \
 252        mode &= ~xxx;                                                   \
 253} while (0)
 254
 255#define xxx_inactive(mode, bitmask, xxx)                                \
 256do {                                                                    \
 257        bitmask &= ~xxx;                                                \
 258        mode &= ~xxx;                                                   \
 259} while (0)
 260
 261#define is_promisc_enable(mode, bitmask)                                \
 262        is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
 263
 264#define is_promisc_disable(mode, bitmask)                               \
 265        is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
 266
 267#define promisc_enable(mode, bitmask)                                   \
 268        xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
 269
 270#define promisc_disable(mode, bitmask)                                  \
 271        xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
 272
 273#define promisc_inactive(mode, bitmask)                                 \
 274        xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
 275
 276#define is_default_enable(mode, bitmask)                                \
 277        is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
 278
 279#define is_default_disable(mode, bitmask)                               \
 280        is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
 281
 282#define default_enable(mode, bitmask)                                   \
 283        xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
 284
 285#define default_disable(mode, bitmask)                                  \
 286        xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
 287
 288#define default_inactive(mode, bitmask)                                 \
 289        xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
 290
 291#define is_allmulti_enable(mode, bitmask)                               \
 292        is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
 293
 294#define is_allmulti_disable(mode, bitmask)                              \
 295        is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
 296
 297#define allmulti_enable(mode, bitmask)                                  \
 298        xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
 299
 300#define allmulti_disable(mode, bitmask)                                 \
 301        xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
 302
 303#define allmulti_inactive(mode, bitmask)                                \
 304        xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
 305
 306#define GET_RXQS(rxp, q0, q1)   do {                                    \
 307        switch ((rxp)->type) {                                          \
 308        case BNA_RXP_SINGLE:                                            \
 309                (q0) = rxp->rxq.single.only;                            \
 310                (q1) = NULL;                                            \
 311                break;                                                  \
 312        case BNA_RXP_SLR:                                               \
 313                (q0) = rxp->rxq.slr.large;                              \
 314                (q1) = rxp->rxq.slr.small;                              \
 315                break;                                                  \
 316        case BNA_RXP_HDS:                                               \
 317                (q0) = rxp->rxq.hds.data;                               \
 318                (q1) = rxp->rxq.hds.hdr;                                \
 319                break;                                                  \
 320        }                                                               \
 321} while (0)
 322
 323#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
 324
 325#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
 326
 327#define bna_tx_from_rid(_bna, _rid, _tx)                                \
 328do {                                                                \
 329        struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;    \
 330        struct bna_tx *__tx;                                        \
 331        struct list_head *qe;                                      \
 332        _tx = NULL;                                                  \
 333        list_for_each(qe, &__tx_mod->tx_active_q) {                  \
 334                __tx = (struct bna_tx *)qe;                          \
 335                if (__tx->rid == (_rid)) {                            \
 336                        (_tx) = __tx;                              \
 337                        break;                                    \
 338                }                                                      \
 339        }                                                              \
 340} while (0)
 341
 342#define bna_rx_from_rid(_bna, _rid, _rx)                                \
 343do {                                                                    \
 344        struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod;                  \
 345        struct bna_rx *__rx;                                            \
 346        struct list_head *qe;                                           \
 347        _rx = NULL;                                                     \
 348        list_for_each(qe, &__rx_mod->rx_active_q) {                     \
 349                __rx = (struct bna_rx *)qe;                             \
 350                if (__rx->rid == (_rid)) {                              \
 351                        (_rx) = __rx;                                   \
 352                        break;                                          \
 353                }                                                       \
 354        }                                                               \
 355} while (0)
 356
 357#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
 358
 359#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
 360
 361#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
 362
 363#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
 364
 365/*  Inline functions  */
 366
 367static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
 368{
 369        struct bna_mac *mac = NULL;
 370        struct list_head *qe;
 371        list_for_each(qe, q) {
 372                if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
 373                        mac = (struct bna_mac *)qe;
 374                        break;
 375                }
 376        }
 377        return mac;
 378}
 379
 380#define bna_attr(_bna) (&(_bna)->ioceth.attr)
 381
 382/* Function prototypes */
 383
 384/* BNA */
 385
 386/* FW response handlers */
 387void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
 388
 389/* APIs for BNAD */
 390void bna_res_req(struct bna_res_info *res_info);
 391void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
 392void bna_init(struct bna *bna, struct bnad *bnad,
 393                        struct bfa_pcidev *pcidev,
 394                        struct bna_res_info *res_info);
 395void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
 396void bna_uninit(struct bna *bna);
 397int bna_num_txq_set(struct bna *bna, int num_txq);
 398int bna_num_rxp_set(struct bna *bna, int num_rxp);
 399void bna_hw_stats_get(struct bna *bna);
 400
 401/* APIs for RxF */
 402struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
 403void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
 404struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
 405void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
 406                          struct bna_mcam_handle *handle);
 407
 408/* MBOX */
 409
 410/* API for BNAD */
 411void bna_mbox_handler(struct bna *bna, u32 intr_status);
 412
 413/* ETHPORT */
 414
 415/* Callbacks for RX */
 416void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
 417void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
 418
 419/* TX MODULE AND TX */
 420
 421/* FW response handelrs */
 422void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
 423                               struct bfi_msgq_mhdr *msghdr);
 424void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
 425                              struct bfi_msgq_mhdr *msghdr);
 426void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
 427
 428/* APIs for BNA */
 429void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
 430                     struct bna_res_info *res_info);
 431void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
 432
 433/* APIs for ENET */
 434void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
 435void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
 436void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
 437
 438/* APIs for BNAD */
 439void bna_tx_res_req(int num_txq, int txq_depth,
 440                    struct bna_res_info *res_info);
 441struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
 442                               struct bna_tx_config *tx_cfg,
 443                               const struct bna_tx_event_cbfn *tx_cbfn,
 444                               struct bna_res_info *res_info, void *priv);
 445void bna_tx_destroy(struct bna_tx *tx);
 446void bna_tx_enable(struct bna_tx *tx);
 447void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
 448                    void (*cbfn)(void *, struct bna_tx *));
 449void bna_tx_cleanup_complete(struct bna_tx *tx);
 450void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
 451
 452/* RX MODULE, RX, RXF */
 453
 454/* FW response handlers */
 455void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
 456                               struct bfi_msgq_mhdr *msghdr);
 457void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
 458                              struct bfi_msgq_mhdr *msghdr);
 459void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
 460void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
 461                               struct bfi_msgq_mhdr *msghdr);
 462void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
 463                               struct bfi_msgq_mhdr *msghdr);
 464
 465/* APIs for BNA */
 466void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
 467                     struct bna_res_info *res_info);
 468void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
 469
 470/* APIs for ENET */
 471void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
 472void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
 473void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
 474
 475/* APIs for BNAD */
 476void bna_rx_res_req(struct bna_rx_config *rx_config,
 477                    struct bna_res_info *res_info);
 478struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
 479                               struct bna_rx_config *rx_cfg,
 480                               const struct bna_rx_event_cbfn *rx_cbfn,
 481                               struct bna_res_info *res_info, void *priv);
 482void bna_rx_destroy(struct bna_rx *rx);
 483void bna_rx_enable(struct bna_rx *rx);
 484void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
 485                    void (*cbfn)(void *, struct bna_rx *));
 486void bna_rx_cleanup_complete(struct bna_rx *rx);
 487void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
 488void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
 489void bna_rx_dim_update(struct bna_ccb *ccb);
 490enum bna_cb_status
 491bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
 492                 void (*cbfn)(struct bnad *, struct bna_rx *));
 493enum bna_cb_status
 494bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
 495                 void (*cbfn)(struct bnad *, struct bna_rx *));
 496enum bna_cb_status
 497bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
 498                 void (*cbfn)(struct bnad *, struct bna_rx *));
 499enum bna_cb_status
 500bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
 501                     void (*cbfn)(struct bnad *, struct bna_rx *));
 502enum bna_cb_status
 503bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
 504                 void (*cbfn)(struct bnad *, struct bna_rx *));
 505enum bna_cb_status
 506bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
 507                     void (*cbfn)(struct bnad *, struct bna_rx *));
 508void
 509bna_rx_mcast_delall(struct bna_rx *rx,
 510                    void (*cbfn)(struct bnad *, struct bna_rx *));
 511enum bna_cb_status
 512bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
 513                enum bna_rxmode bitmask,
 514                void (*cbfn)(struct bnad *, struct bna_rx *));
 515void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
 516void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
 517void bna_rx_vlanfilter_enable(struct bna_rx *rx);
 518void bna_rx_vlan_strip_enable(struct bna_rx *rx);
 519void bna_rx_vlan_strip_disable(struct bna_rx *rx);
 520/* ENET */
 521
 522/* API for RX */
 523int bna_enet_mtu_get(struct bna_enet *enet);
 524
 525/* Callbacks for TX, RX */
 526void bna_enet_cb_tx_stopped(struct bna_enet *enet);
 527void bna_enet_cb_rx_stopped(struct bna_enet *enet);
 528
 529/* API for BNAD */
 530void bna_enet_enable(struct bna_enet *enet);
 531void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
 532                      void (*cbfn)(void *));
 533void bna_enet_pause_config(struct bna_enet *enet,
 534                           struct bna_pause_config *pause_config,
 535                           void (*cbfn)(struct bnad *));
 536void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
 537                      void (*cbfn)(struct bnad *));
 538void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
 539
 540/* IOCETH */
 541
 542/* APIs for BNAD */
 543void bna_ioceth_enable(struct bna_ioceth *ioceth);
 544void bna_ioceth_disable(struct bna_ioceth *ioceth,
 545                        enum bna_cleanup_type type);
 546
 547/* BNAD */
 548
 549/* Callbacks for ENET */
 550void bnad_cb_ethport_link_status(struct bnad *bnad,
 551                              enum bna_link_status status);
 552
 553/* Callbacks for IOCETH */
 554void bnad_cb_ioceth_ready(struct bnad *bnad);
 555void bnad_cb_ioceth_failed(struct bnad *bnad);
 556void bnad_cb_ioceth_disabled(struct bnad *bnad);
 557void bnad_cb_mbox_intr_enable(struct bnad *bnad);
 558void bnad_cb_mbox_intr_disable(struct bnad *bnad);
 559
 560/* Callbacks for BNA */
 561void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
 562                       struct bna_stats *stats);
 563
 564#endif  /* __BNA_H__ */
 565