linux/drivers/net/ethernet/emulex/benet/be.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2011 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@emulex.com
  12 *
  13 * Emulex
  14 * 3333 Susan Street
  15 * Costa Mesa, CA 92626
  16 */
  17
  18#ifndef BE_H
  19#define BE_H
  20
  21#include <linux/pci.h>
  22#include <linux/etherdevice.h>
  23#include <linux/delay.h>
  24#include <net/tcp.h>
  25#include <net/ip.h>
  26#include <net/ipv6.h>
  27#include <linux/if_vlan.h>
  28#include <linux/workqueue.h>
  29#include <linux/interrupt.h>
  30#include <linux/firmware.h>
  31#include <linux/slab.h>
  32#include <linux/u64_stats_sync.h>
  33
  34#include "be_hw.h"
  35#include "be_roce.h"
  36
  37#define DRV_VER                 "4.4.31.0u"
  38#define DRV_NAME                "be2net"
  39#define BE_NAME                 "ServerEngines BladeEngine2 10Gbps NIC"
  40#define BE3_NAME                "ServerEngines BladeEngine3 10Gbps NIC"
  41#define OC_NAME                 "Emulex OneConnect 10Gbps NIC"
  42#define OC_NAME_BE              OC_NAME "(be3)"
  43#define OC_NAME_LANCER          OC_NAME "(Lancer)"
  44#define OC_NAME_SH              OC_NAME "(Skyhawk)"
  45#define DRV_DESC                "ServerEngines BladeEngine 10Gbps NIC Driver"
  46
  47#define BE_VENDOR_ID            0x19a2
  48#define EMULEX_VENDOR_ID        0x10df
  49#define BE_DEVICE_ID1           0x211
  50#define BE_DEVICE_ID2           0x221
  51#define OC_DEVICE_ID1           0x700   /* Device Id for BE2 cards */
  52#define OC_DEVICE_ID2           0x710   /* Device Id for BE3 cards */
  53#define OC_DEVICE_ID3           0xe220  /* Device id for Lancer cards */
  54#define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
  55#define OC_DEVICE_ID5           0x720   /* Device Id for Skyhawk cards */
  56#define OC_SUBSYS_DEVICE_ID1    0xE602
  57#define OC_SUBSYS_DEVICE_ID2    0xE642
  58#define OC_SUBSYS_DEVICE_ID3    0xE612
  59#define OC_SUBSYS_DEVICE_ID4    0xE652
  60
  61static inline char *nic_name(struct pci_dev *pdev)
  62{
  63        switch (pdev->device) {
  64        case OC_DEVICE_ID1:
  65                return OC_NAME;
  66        case OC_DEVICE_ID2:
  67                return OC_NAME_BE;
  68        case OC_DEVICE_ID3:
  69        case OC_DEVICE_ID4:
  70                return OC_NAME_LANCER;
  71        case BE_DEVICE_ID2:
  72                return BE3_NAME;
  73        case OC_DEVICE_ID5:
  74                return OC_NAME_SH;
  75        default:
  76                return BE_NAME;
  77        }
  78}
  79
  80/* Number of bytes of an RX frame that are copied to skb->data */
  81#define BE_HDR_LEN              ((u16) 64)
  82/* allocate extra space to allow tunneling decapsulation without head reallocation */
  83#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
  84
  85#define BE_MAX_JUMBO_FRAME_SIZE 9018
  86#define BE_MIN_MTU              256
  87
  88#define BE_NUM_VLANS_SUPPORTED  64
  89#define BE_MAX_EQD              96u
  90#define BE_MAX_TX_FRAG_COUNT    30
  91
  92#define EVNT_Q_LEN              1024
  93#define TX_Q_LEN                2048
  94#define TX_CQ_LEN               1024
  95#define RX_Q_LEN                1024    /* Does not support any other value */
  96#define RX_CQ_LEN               1024
  97#define MCC_Q_LEN               128     /* total size not to exceed 8 pages */
  98#define MCC_CQ_LEN              256
  99
 100#define BE3_MAX_RSS_QS          8
 101#define BE2_MAX_RSS_QS          4
 102#define MAX_RSS_QS              BE3_MAX_RSS_QS
 103#define MAX_RX_QS               (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
 104
 105#define MAX_TX_QS               8
 106#define MAX_ROCE_EQS            5
 107#define MAX_MSIX_VECTORS        (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
 108#define BE_TX_BUDGET            256
 109#define BE_NAPI_WEIGHT          64
 110#define MAX_RX_POST             BE_NAPI_WEIGHT /* Frags posted at a time */
 111#define RX_FRAGS_REFILL_WM      (RX_Q_LEN - MAX_RX_POST)
 112
 113#define MAX_VFS                 30 /* Max VFs supported by BE3 FW */
 114#define FW_VER_LEN              32
 115
 116struct be_dma_mem {
 117        void *va;
 118        dma_addr_t dma;
 119        u32 size;
 120};
 121
 122struct be_queue_info {
 123        struct be_dma_mem dma_mem;
 124        u16 len;
 125        u16 entry_size; /* Size of an element in the queue */
 126        u16 id;
 127        u16 tail, head;
 128        bool created;
 129        atomic_t used;  /* Number of valid elements in the queue */
 130};
 131
 132static inline u32 MODULO(u16 val, u16 limit)
 133{
 134        BUG_ON(limit & (limit - 1));
 135        return val & (limit - 1);
 136}
 137
 138static inline void index_adv(u16 *index, u16 val, u16 limit)
 139{
 140        *index = MODULO((*index + val), limit);
 141}
 142
 143static inline void index_inc(u16 *index, u16 limit)
 144{
 145        *index = MODULO((*index + 1), limit);
 146}
 147
 148static inline void *queue_head_node(struct be_queue_info *q)
 149{
 150        return q->dma_mem.va + q->head * q->entry_size;
 151}
 152
 153static inline void *queue_tail_node(struct be_queue_info *q)
 154{
 155        return q->dma_mem.va + q->tail * q->entry_size;
 156}
 157
 158static inline void *queue_index_node(struct be_queue_info *q, u16 index)
 159{
 160        return q->dma_mem.va + index * q->entry_size;
 161}
 162
 163static inline void queue_head_inc(struct be_queue_info *q)
 164{
 165        index_inc(&q->head, q->len);
 166}
 167
 168static inline void index_dec(u16 *index, u16 limit)
 169{
 170        *index = MODULO((*index - 1), limit);
 171}
 172
 173static inline void queue_tail_inc(struct be_queue_info *q)
 174{
 175        index_inc(&q->tail, q->len);
 176}
 177
 178struct be_eq_obj {
 179        struct be_queue_info q;
 180        char desc[32];
 181
 182        /* Adaptive interrupt coalescing (AIC) info */
 183        bool enable_aic;
 184        u32 min_eqd;            /* in usecs */
 185        u32 max_eqd;            /* in usecs */
 186        u32 eqd;                /* configured val when aic is off */
 187        u32 cur_eqd;            /* in usecs */
 188
 189        u8 idx;                 /* array index */
 190        u16 tx_budget;
 191        struct napi_struct napi;
 192        struct be_adapter *adapter;
 193} ____cacheline_aligned_in_smp;
 194
 195struct be_mcc_obj {
 196        struct be_queue_info q;
 197        struct be_queue_info cq;
 198        bool rearm_cq;
 199};
 200
 201struct be_tx_stats {
 202        u64 tx_bytes;
 203        u64 tx_pkts;
 204        u64 tx_reqs;
 205        u64 tx_wrbs;
 206        u64 tx_compl;
 207        ulong tx_jiffies;
 208        u32 tx_stops;
 209        struct u64_stats_sync sync;
 210        struct u64_stats_sync sync_compl;
 211};
 212
 213struct be_tx_obj {
 214        struct be_queue_info q;
 215        struct be_queue_info cq;
 216        /* Remember the skbs that were transmitted */
 217        struct sk_buff *sent_skb_list[TX_Q_LEN];
 218        struct be_tx_stats stats;
 219} ____cacheline_aligned_in_smp;
 220
 221/* Struct to remember the pages posted for rx frags */
 222struct be_rx_page_info {
 223        struct page *page;
 224        DEFINE_DMA_UNMAP_ADDR(bus);
 225        u16 page_offset;
 226        bool last_page_user;
 227};
 228
 229struct be_rx_stats {
 230        u64 rx_bytes;
 231        u64 rx_pkts;
 232        u64 rx_pkts_prev;
 233        ulong rx_jiffies;
 234        u32 rx_drops_no_skbs;   /* skb allocation errors */
 235        u32 rx_drops_no_frags;  /* HW has no fetched frags */
 236        u32 rx_post_fail;       /* page post alloc failures */
 237        u32 rx_compl;
 238        u32 rx_mcast_pkts;
 239        u32 rx_compl_err;       /* completions with err set */
 240        u32 rx_pps;             /* pkts per second */
 241        struct u64_stats_sync sync;
 242};
 243
 244struct be_rx_compl_info {
 245        u32 rss_hash;
 246        u16 vlan_tag;
 247        u16 pkt_size;
 248        u16 rxq_idx;
 249        u16 port;
 250        u8 vlanf;
 251        u8 num_rcvd;
 252        u8 err;
 253        u8 ipf;
 254        u8 tcpf;
 255        u8 udpf;
 256        u8 ip_csum;
 257        u8 l4_csum;
 258        u8 ipv6;
 259        u8 vtm;
 260        u8 pkt_type;
 261};
 262
 263struct be_rx_obj {
 264        struct be_adapter *adapter;
 265        struct be_queue_info q;
 266        struct be_queue_info cq;
 267        struct be_rx_compl_info rxcp;
 268        struct be_rx_page_info page_info_tbl[RX_Q_LEN];
 269        struct be_rx_stats stats;
 270        u8 rss_id;
 271        bool rx_post_starved;   /* Zero rx frags have been posted to BE */
 272} ____cacheline_aligned_in_smp;
 273
 274struct be_drv_stats {
 275        u32 be_on_die_temperature;
 276        u32 eth_red_drops;
 277        u32 rx_drops_no_pbuf;
 278        u32 rx_drops_no_txpb;
 279        u32 rx_drops_no_erx_descr;
 280        u32 rx_drops_no_tpre_descr;
 281        u32 rx_drops_too_many_frags;
 282        u32 forwarded_packets;
 283        u32 rx_drops_mtu;
 284        u32 rx_crc_errors;
 285        u32 rx_alignment_symbol_errors;
 286        u32 rx_pause_frames;
 287        u32 rx_priority_pause_frames;
 288        u32 rx_control_frames;
 289        u32 rx_in_range_errors;
 290        u32 rx_out_range_errors;
 291        u32 rx_frame_too_long;
 292        u32 rx_address_mismatch_drops;
 293        u32 rx_dropped_too_small;
 294        u32 rx_dropped_too_short;
 295        u32 rx_dropped_header_too_small;
 296        u32 rx_dropped_tcp_length;
 297        u32 rx_dropped_runt;
 298        u32 rx_ip_checksum_errs;
 299        u32 rx_tcp_checksum_errs;
 300        u32 rx_udp_checksum_errs;
 301        u32 tx_pauseframes;
 302        u32 tx_priority_pauseframes;
 303        u32 tx_controlframes;
 304        u32 rxpp_fifo_overflow_drop;
 305        u32 rx_input_fifo_overflow_drop;
 306        u32 pmem_fifo_overflow_drop;
 307        u32 jabber_events;
 308};
 309
 310struct be_vf_cfg {
 311        unsigned char mac_addr[ETH_ALEN];
 312        int if_handle;
 313        int pmac_id;
 314        u16 def_vid;
 315        u16 vlan_tag;
 316        u32 tx_rate;
 317};
 318
 319enum vf_state {
 320        ENABLED = 0,
 321        ASSIGNED = 1
 322};
 323
 324#define BE_FLAGS_LINK_STATUS_INIT               1
 325#define BE_FLAGS_WORKER_SCHEDULED               (1 << 3)
 326#define BE_UC_PMAC_COUNT                30
 327#define BE_VF_UC_PMAC_COUNT             2
 328
 329struct phy_info {
 330        u8 transceiver;
 331        u8 autoneg;
 332        u8 fc_autoneg;
 333        u8 port_type;
 334        u16 phy_type;
 335        u16 interface_type;
 336        u32 misc_params;
 337        u16 auto_speeds_supported;
 338        u16 fixed_speeds_supported;
 339        int link_speed;
 340        u32 dac_cable_len;
 341        u32 advertising;
 342        u32 supported;
 343};
 344
 345struct be_adapter {
 346        struct pci_dev *pdev;
 347        struct net_device *netdev;
 348
 349        u8 __iomem *csr;
 350        u8 __iomem *db;         /* Door Bell */
 351
 352        struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
 353        struct be_dma_mem mbox_mem;
 354        /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
 355         * is stored for freeing purpose */
 356        struct be_dma_mem mbox_mem_alloced;
 357
 358        struct be_mcc_obj mcc_obj;
 359        spinlock_t mcc_lock;    /* For serializing mcc cmds to BE card */
 360        spinlock_t mcc_cq_lock;
 361
 362        u32 num_msix_vec;
 363        u32 num_evt_qs;
 364        struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
 365        struct msix_entry msix_entries[MAX_MSIX_VECTORS];
 366        bool isr_registered;
 367
 368        /* TX Rings */
 369        u32 num_tx_qs;
 370        struct be_tx_obj tx_obj[MAX_TX_QS];
 371
 372        /* Rx rings */
 373        u32 num_rx_qs;
 374        struct be_rx_obj rx_obj[MAX_RX_QS];
 375        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 376
 377        u8 eq_next_idx;
 378        struct be_drv_stats drv_stats;
 379
 380        u16 vlans_added;
 381        u16 max_vlans;  /* Number of vlans supported */
 382        u8 vlan_tag[VLAN_N_VID];
 383        u8 vlan_prio_bmap;      /* Available Priority BitMap */
 384        u16 recommended_prio;   /* Recommended Priority */
 385        struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
 386
 387        struct be_dma_mem stats_cmd;
 388        /* Work queue used to perform periodic tasks like getting statistics */
 389        struct delayed_work work;
 390        u16 work_counter;
 391
 392        struct delayed_work func_recovery_work;
 393        u32 flags;
 394        /* Ethtool knobs and info */
 395        char fw_ver[FW_VER_LEN];
 396        int if_handle;          /* Used to configure filtering */
 397        u32 *pmac_id;           /* MAC addr handle used by BE card */
 398        u32 beacon_state;       /* for set_phys_id */
 399
 400        bool eeh_error;
 401        bool fw_timeout;
 402        bool hw_error;
 403
 404        u32 port_num;
 405        bool promiscuous;
 406        u32 function_mode;
 407        u32 function_caps;
 408        u32 rx_fc;              /* Rx flow control */
 409        u32 tx_fc;              /* Tx flow control */
 410        bool stats_cmd_sent;
 411        u8 generation;          /* BladeEngine ASIC generation */
 412        u32 if_type;
 413        struct {
 414                u8 __iomem *base;       /* Door Bell */
 415                u32 size;
 416                u32 total_size;
 417                u64 io_addr;
 418        } roce_db;
 419        u32 num_msix_roce_vec;
 420        struct ocrdma_dev *ocrdma_dev;
 421        struct list_head entry;
 422
 423        u32 flash_status;
 424        struct completion flash_compl;
 425
 426        u32 num_vfs;            /* Number of VFs provisioned by PF driver */
 427        u32 dev_num_vfs;        /* Number of VFs supported by HW */
 428        u8 virtfn;
 429        struct be_vf_cfg *vf_cfg;
 430        bool be3_native;
 431        u32 sli_family;
 432        u8 hba_port_num;
 433        u16 pvid;
 434        struct phy_info phy;
 435        u8 wol_cap;
 436        bool wol;
 437        u32 max_pmac_cnt;       /* Max secondary UC MACs programmable */
 438        u32 uc_macs;            /* Count of secondary UC MAC programmed */
 439        u32 msg_enable;
 440        int be_get_temp_freq;
 441};
 442
 443#define be_physfn(adapter)              (!adapter->virtfn)
 444#define sriov_enabled(adapter)          (adapter->num_vfs > 0)
 445#define sriov_want(adapter)             (adapter->dev_num_vfs && num_vfs && \
 446                                         be_physfn(adapter))
 447#define for_all_vfs(adapter, vf_cfg, i)                                 \
 448        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
 449                i++, vf_cfg++)
 450
 451/* BladeEngine Generation numbers */
 452#define BE_GEN2 2
 453#define BE_GEN3 3
 454
 455#define ON                              1
 456#define OFF                             0
 457#define lancer_chip(adapter)    ((adapter->pdev->device == OC_DEVICE_ID3) || \
 458                                 (adapter->pdev->device == OC_DEVICE_ID4))
 459
 460#define skyhawk_chip(adapter)   (adapter->pdev->device == OC_DEVICE_ID5)
 461
 462
 463#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
 464                                adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
 465                                (adapter->function_mode & RDMA_ENABLED))
 466
 467extern const struct ethtool_ops be_ethtool_ops;
 468
 469#define msix_enabled(adapter)           (adapter->num_msix_vec > 0)
 470#define num_irqs(adapter)               (msix_enabled(adapter) ?        \
 471                                                adapter->num_msix_vec : 1)
 472#define tx_stats(txo)                   (&(txo)->stats)
 473#define rx_stats(rxo)                   (&(rxo)->stats)
 474
 475/* The default RXQ is the last RXQ */
 476#define default_rxo(adpt)               (&adpt->rx_obj[adpt->num_rx_qs - 1])
 477
 478#define for_all_rx_queues(adapter, rxo, i)                              \
 479        for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;  \
 480                i++, rxo++)
 481
 482/* Skip the default non-rss queue (last one)*/
 483#define for_all_rss_queues(adapter, rxo, i)                             \
 484        for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
 485                i++, rxo++)
 486
 487#define for_all_tx_queues(adapter, txo, i)                              \
 488        for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;  \
 489                i++, txo++)
 490
 491#define for_all_evt_queues(adapter, eqo, i)                             \
 492        for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
 493                i++, eqo++)
 494
 495#define is_mcc_eqo(eqo)                 (eqo->idx == 0)
 496#define mcc_eqo(adapter)                (&adapter->eq_obj[0])
 497
 498#define PAGE_SHIFT_4K           12
 499#define PAGE_SIZE_4K            (1 << PAGE_SHIFT_4K)
 500
 501/* Returns number of pages spanned by the data starting at the given addr */
 502#define PAGES_4K_SPANNED(_address, size)                                \
 503                ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) +     \
 504                        (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
 505
 506/* Returns bit offset within a DWORD of a bitfield */
 507#define AMAP_BIT_OFFSET(_struct, field)                                 \
 508                (((size_t)&(((_struct *)0)->field))%32)
 509
 510/* Returns the bit mask of the field that is NOT shifted into location. */
 511static inline u32 amap_mask(u32 bitsize)
 512{
 513        return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
 514}
 515
 516static inline void
 517amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
 518{
 519        u32 *dw = (u32 *) ptr + dw_offset;
 520        *dw &= ~(mask << offset);
 521        *dw |= (mask & value) << offset;
 522}
 523
 524#define AMAP_SET_BITS(_struct, field, ptr, val)                         \
 525                amap_set(ptr,                                           \
 526                        offsetof(_struct, field)/32,                    \
 527                        amap_mask(sizeof(((_struct *)0)->field)),       \
 528                        AMAP_BIT_OFFSET(_struct, field),                \
 529                        val)
 530
 531static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
 532{
 533        u32 *dw = (u32 *) ptr;
 534        return mask & (*(dw + dw_offset) >> offset);
 535}
 536
 537#define AMAP_GET_BITS(_struct, field, ptr)                              \
 538                amap_get(ptr,                                           \
 539                        offsetof(_struct, field)/32,                    \
 540                        amap_mask(sizeof(((_struct *)0)->field)),       \
 541                        AMAP_BIT_OFFSET(_struct, field))
 542
 543#define be_dws_cpu_to_le(wrb, len)      swap_dws(wrb, len)
 544#define be_dws_le_to_cpu(wrb, len)      swap_dws(wrb, len)
 545static inline void swap_dws(void *wrb, int len)
 546{
 547#ifdef __BIG_ENDIAN
 548        u32 *dw = wrb;
 549        BUG_ON(len % 4);
 550        do {
 551                *dw = cpu_to_le32(*dw);
 552                dw++;
 553                len -= 4;
 554        } while (len);
 555#endif                          /* __BIG_ENDIAN */
 556}
 557
 558static inline u8 is_tcp_pkt(struct sk_buff *skb)
 559{
 560        u8 val = 0;
 561
 562        if (ip_hdr(skb)->version == 4)
 563                val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
 564        else if (ip_hdr(skb)->version == 6)
 565                val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
 566
 567        return val;
 568}
 569
 570static inline u8 is_udp_pkt(struct sk_buff *skb)
 571{
 572        u8 val = 0;
 573
 574        if (ip_hdr(skb)->version == 4)
 575                val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
 576        else if (ip_hdr(skb)->version == 6)
 577                val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
 578
 579        return val;
 580}
 581
 582static inline bool is_ipv4_pkt(struct sk_buff *skb)
 583{
 584        return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
 585}
 586
 587static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
 588{
 589        u32 addr;
 590
 591        addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
 592
 593        mac[5] = (u8)(addr & 0xFF);
 594        mac[4] = (u8)((addr >> 8) & 0xFF);
 595        mac[3] = (u8)((addr >> 16) & 0xFF);
 596        /* Use the OUI from the current MAC address */
 597        memcpy(mac, adapter->netdev->dev_addr, 3);
 598}
 599
 600static inline bool be_multi_rxq(const struct be_adapter *adapter)
 601{
 602        return adapter->num_rx_qs > 1;
 603}
 604
 605static inline bool be_error(struct be_adapter *adapter)
 606{
 607        return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
 608}
 609
 610static inline bool be_crit_error(struct be_adapter *adapter)
 611{
 612        return adapter->eeh_error || adapter->hw_error;
 613}
 614
 615static inline void  be_clear_all_error(struct be_adapter *adapter)
 616{
 617        adapter->eeh_error = false;
 618        adapter->hw_error = false;
 619        adapter->fw_timeout = false;
 620}
 621
 622static inline bool be_is_wol_excluded(struct be_adapter *adapter)
 623{
 624        struct pci_dev *pdev = adapter->pdev;
 625
 626        if (!be_physfn(adapter))
 627                return true;
 628
 629        switch (pdev->subsystem_device) {
 630        case OC_SUBSYS_DEVICE_ID1:
 631        case OC_SUBSYS_DEVICE_ID2:
 632        case OC_SUBSYS_DEVICE_ID3:
 633        case OC_SUBSYS_DEVICE_ID4:
 634                return true;
 635        default:
 636                return false;
 637        }
 638}
 639
 640static inline bool be_type_2_3(struct be_adapter *adapter)
 641{
 642        return (adapter->if_type == SLI_INTF_TYPE_2 ||
 643                adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
 644}
 645
 646extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 647                u16 num_popped);
 648extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
 649extern void be_parse_stats(struct be_adapter *adapter);
 650extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 651extern bool be_is_wol_supported(struct be_adapter *adapter);
 652extern bool be_pause_supported(struct be_adapter *adapter);
 653extern u32 be_get_fw_log_level(struct be_adapter *adapter);
 654
 655/*
 656 * internal function to initialize-cleanup roce device.
 657 */
 658extern void be_roce_dev_add(struct be_adapter *);
 659extern void be_roce_dev_remove(struct be_adapter *);
 660
 661/*
 662 * internal function to open-close roce device during ifup-ifdown.
 663 */
 664extern void be_roce_dev_open(struct be_adapter *);
 665extern void be_roce_dev_close(struct be_adapter *);
 666
 667#endif                          /* BE_H */
 668