linux/drivers/net/ethernet/qlogic/qede/qede.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
   2/* QLogic qede NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#ifndef _QEDE_H_
   8#define _QEDE_H_
   9#include <linux/compiler.h>
  10#include <linux/version.h>
  11#include <linux/workqueue.h>
  12#include <linux/netdevice.h>
  13#include <linux/interrupt.h>
  14#include <linux/bitmap.h>
  15#include <linux/kernel.h>
  16#include <linux/mutex.h>
  17#include <linux/bpf.h>
  18#include <net/xdp.h>
  19#include <linux/qed/qede_rdma.h>
  20#include <linux/io.h>
  21#ifdef CONFIG_RFS_ACCEL
  22#include <linux/cpu_rmap.h>
  23#endif
  24#include <linux/qed/common_hsi.h>
  25#include <linux/qed/eth_common.h>
  26#include <linux/qed/qed_if.h>
  27#include <linux/qed/qed_chain.h>
  28#include <linux/qed/qed_eth_if.h>
  29
  30#include <net/pkt_cls.h>
  31#include <net/tc_act/tc_gact.h>
  32
  33#define DRV_MODULE_SYM          qede
  34
  35struct qede_stats_common {
  36        u64 no_buff_discards;
  37        u64 packet_too_big_discard;
  38        u64 ttl0_discard;
  39        u64 rx_ucast_bytes;
  40        u64 rx_mcast_bytes;
  41        u64 rx_bcast_bytes;
  42        u64 rx_ucast_pkts;
  43        u64 rx_mcast_pkts;
  44        u64 rx_bcast_pkts;
  45        u64 mftag_filter_discards;
  46        u64 mac_filter_discards;
  47        u64 gft_filter_drop;
  48        u64 tx_ucast_bytes;
  49        u64 tx_mcast_bytes;
  50        u64 tx_bcast_bytes;
  51        u64 tx_ucast_pkts;
  52        u64 tx_mcast_pkts;
  53        u64 tx_bcast_pkts;
  54        u64 tx_err_drop_pkts;
  55        u64 coalesced_pkts;
  56        u64 coalesced_events;
  57        u64 coalesced_aborts_num;
  58        u64 non_coalesced_pkts;
  59        u64 coalesced_bytes;
  60        u64 link_change_count;
  61        u64 ptp_skip_txts;
  62
  63        /* port */
  64        u64 rx_64_byte_packets;
  65        u64 rx_65_to_127_byte_packets;
  66        u64 rx_128_to_255_byte_packets;
  67        u64 rx_256_to_511_byte_packets;
  68        u64 rx_512_to_1023_byte_packets;
  69        u64 rx_1024_to_1518_byte_packets;
  70        u64 rx_crc_errors;
  71        u64 rx_mac_crtl_frames;
  72        u64 rx_pause_frames;
  73        u64 rx_pfc_frames;
  74        u64 rx_align_errors;
  75        u64 rx_carrier_errors;
  76        u64 rx_oversize_packets;
  77        u64 rx_jabbers;
  78        u64 rx_undersize_packets;
  79        u64 rx_fragments;
  80        u64 tx_64_byte_packets;
  81        u64 tx_65_to_127_byte_packets;
  82        u64 tx_128_to_255_byte_packets;
  83        u64 tx_256_to_511_byte_packets;
  84        u64 tx_512_to_1023_byte_packets;
  85        u64 tx_1024_to_1518_byte_packets;
  86        u64 tx_pause_frames;
  87        u64 tx_pfc_frames;
  88        u64 brb_truncates;
  89        u64 brb_discards;
  90        u64 tx_mac_ctrl_frames;
  91};
  92
  93struct qede_stats_bb {
  94        u64 rx_1519_to_1522_byte_packets;
  95        u64 rx_1519_to_2047_byte_packets;
  96        u64 rx_2048_to_4095_byte_packets;
  97        u64 rx_4096_to_9216_byte_packets;
  98        u64 rx_9217_to_16383_byte_packets;
  99        u64 tx_1519_to_2047_byte_packets;
 100        u64 tx_2048_to_4095_byte_packets;
 101        u64 tx_4096_to_9216_byte_packets;
 102        u64 tx_9217_to_16383_byte_packets;
 103        u64 tx_lpi_entry_count;
 104        u64 tx_total_collisions;
 105};
 106
 107struct qede_stats_ah {
 108        u64 rx_1519_to_max_byte_packets;
 109        u64 tx_1519_to_max_byte_packets;
 110};
 111
 112struct qede_stats {
 113        struct qede_stats_common common;
 114
 115        union {
 116                struct qede_stats_bb bb;
 117                struct qede_stats_ah ah;
 118        };
 119};
 120
 121struct qede_vlan {
 122        struct list_head list;
 123        u16 vid;
 124        bool configured;
 125};
 126
 127struct qede_rdma_dev {
 128        struct qedr_dev *qedr_dev;
 129        struct list_head entry;
 130        struct list_head rdma_event_list;
 131        struct workqueue_struct *rdma_wq;
 132        struct kref refcnt;
 133        struct completion event_comp;
 134        bool exp_recovery;
 135};
 136
 137struct qede_ptp;
 138
 139#define QEDE_RFS_MAX_FLTR       256
 140
 141enum qede_flags_bit {
 142        QEDE_FLAGS_IS_VF = 0,
 143        QEDE_FLAGS_LINK_REQUESTED,
 144        QEDE_FLAGS_PTP_TX_IN_PRORGESS,
 145        QEDE_FLAGS_TX_TIMESTAMPING_EN
 146};
 147
 148#define QEDE_DUMP_MAX_ARGS 4
 149enum qede_dump_cmd {
 150        QEDE_DUMP_CMD_NONE = 0,
 151        QEDE_DUMP_CMD_NVM_CFG,
 152        QEDE_DUMP_CMD_GRCDUMP,
 153        QEDE_DUMP_CMD_MAX
 154};
 155
 156struct qede_dump_info {
 157        enum qede_dump_cmd cmd;
 158        u8 num_args;
 159        u32 args[QEDE_DUMP_MAX_ARGS];
 160};
 161
 162struct qede_coalesce {
 163        bool isvalid;
 164        u16 rxc;
 165        u16 txc;
 166};
 167
 168struct qede_dev {
 169        struct qed_dev                  *cdev;
 170        struct net_device               *ndev;
 171        struct pci_dev                  *pdev;
 172        struct devlink                  *devlink;
 173
 174        u32                             dp_module;
 175        u8                              dp_level;
 176
 177        unsigned long                   flags;
 178#define IS_VF(edev)                     test_bit(QEDE_FLAGS_IS_VF, \
 179                                                 &(edev)->flags)
 180
 181        const struct qed_eth_ops        *ops;
 182        struct qede_ptp                 *ptp;
 183        u64                             ptp_skip_txts;
 184
 185        struct qed_dev_eth_info         dev_info;
 186#define QEDE_MAX_RSS_CNT(edev)          ((edev)->dev_info.num_queues)
 187#define QEDE_MAX_TSS_CNT(edev)          ((edev)->dev_info.num_queues)
 188#define QEDE_IS_BB(edev) \
 189        ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
 190#define QEDE_IS_AH(edev) \
 191        ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
 192
 193        struct qede_fastpath            *fp_array;
 194        struct qede_coalesce            *coal_entry;
 195        u8                              req_num_tx;
 196        u8                              fp_num_tx;
 197        u8                              req_num_rx;
 198        u8                              fp_num_rx;
 199        u16                             req_queues;
 200        u16                             num_queues;
 201        u16                             total_xdp_queues;
 202
 203#define QEDE_QUEUE_CNT(edev)            ((edev)->num_queues)
 204#define QEDE_RSS_COUNT(edev)            ((edev)->num_queues - (edev)->fp_num_tx)
 205#define QEDE_RX_QUEUE_IDX(edev, i)      (i)
 206#define QEDE_TSS_COUNT(edev)            ((edev)->num_queues - (edev)->fp_num_rx)
 207
 208        struct qed_int_info             int_info;
 209
 210        /* Smaller private variant of the RTNL lock */
 211        struct mutex                    qede_lock;
 212        u32                             state; /* Protected by qede_lock */
 213        u16                             rx_buf_size;
 214        u32                             rx_copybreak;
 215
 216        /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 217#define ETH_OVERHEAD                    (ETH_HLEN + 8 + 8)
 218        /* Max supported alignment is 256 (8 shift)
 219         * minimal alignment shift 6 is optimal for 57xxx HW performance
 220         */
 221#define QEDE_RX_ALIGN_SHIFT             max(6, min(8, L1_CACHE_SHIFT))
 222        /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
 223         * at the end of skb->data, to avoid wasting a full cache line.
 224         * This reduces memory use (skb->truesize).
 225         */
 226#define QEDE_FW_RX_ALIGN_END                                    \
 227        max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT,                  \
 228              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 229
 230        struct qede_stats               stats;
 231
 232        /* Bitfield to track initialized RSS params */
 233        u32                             rss_params_inited;
 234#define QEDE_RSS_INDIR_INITED           BIT(0)
 235#define QEDE_RSS_KEY_INITED             BIT(1)
 236#define QEDE_RSS_CAPS_INITED            BIT(2)
 237
 238        u16                             rss_ind_table[128];
 239        u32                             rss_key[10];
 240        u8                              rss_caps;
 241
 242        /* Both must be a power of two */
 243        u16                             q_num_rx_buffers;
 244        u16                             q_num_tx_buffers;
 245
 246        bool                            gro_disable;
 247
 248        struct list_head                vlan_list;
 249        u16                             configured_vlans;
 250        u16                             non_configured_vlans;
 251        bool                            accept_any_vlan;
 252
 253        struct delayed_work             sp_task;
 254        unsigned long                   sp_flags;
 255        u16                             vxlan_dst_port;
 256        u16                             geneve_dst_port;
 257
 258        struct qede_arfs                *arfs;
 259        bool                            wol_enabled;
 260
 261        struct qede_rdma_dev            rdma_info;
 262
 263        struct bpf_prog                 *xdp_prog;
 264
 265        enum qed_hw_err_type            last_err_type;
 266        unsigned long                   err_flags;
 267#define QEDE_ERR_IS_HANDLED             31
 268#define QEDE_ERR_ATTN_CLR_EN            0
 269#define QEDE_ERR_GET_DBG_INFO           1
 270#define QEDE_ERR_IS_RECOVERABLE         2
 271#define QEDE_ERR_WARN                   3
 272
 273        struct qede_dump_info           dump_info;
 274};
 275
 276enum QEDE_STATE {
 277        QEDE_STATE_CLOSED,
 278        QEDE_STATE_OPEN,
 279        QEDE_STATE_RECOVERY,
 280};
 281
 282#define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
 283
 284#define MAX_NUM_TC      8
 285#define MAX_NUM_PRI     8
 286
 287/* The driver supports the new build_skb() API:
 288 * RX ring buffer contains pointer to kmalloc() data only,
 289 * skb are built only after the frame was DMA-ed.
 290 */
 291struct sw_rx_data {
 292        struct page *data;
 293        dma_addr_t mapping;
 294        unsigned int page_offset;
 295};
 296
 297enum qede_agg_state {
 298        QEDE_AGG_STATE_NONE  = 0,
 299        QEDE_AGG_STATE_START = 1,
 300        QEDE_AGG_STATE_ERROR = 2
 301};
 302
 303struct qede_agg_info {
 304        /* rx_buf is a data buffer that can be placed / consumed from rx bd
 305         * chain. It has two purposes: We will preallocate the data buffer
 306         * for each aggregation when we open the interface and will place this
 307         * buffer on the rx-bd-ring when we receive TPA_START. We don't want
 308         * to be in a state where allocation fails, as we can't reuse the
 309         * consumer buffer in the rx-chain since FW may still be writing to it
 310         * (since header needs to be modified for TPA).
 311         * The second purpose is to keep a pointer to the bd buffer during
 312         * aggregation.
 313         */
 314        struct sw_rx_data buffer;
 315        struct sk_buff *skb;
 316
 317        /* We need some structs from the start cookie until termination */
 318        u16 vlan_tag;
 319
 320        bool tpa_start_fail;
 321        u8 state;
 322        u8 frag_id;
 323
 324        u8 tunnel_type;
 325};
 326
 327struct qede_rx_queue {
 328        __le16 *hw_cons_ptr;
 329        void __iomem *hw_rxq_prod_addr;
 330
 331        /* Required for the allocation of replacement buffers */
 332        struct device *dev;
 333
 334        struct bpf_prog *xdp_prog;
 335
 336        u16 sw_rx_cons;
 337        u16 sw_rx_prod;
 338
 339        u16 filled_buffers;
 340        u8 data_direction;
 341        u8 rxq_id;
 342
 343        /* Used once per each NAPI run */
 344        u16 num_rx_buffers;
 345
 346        u16 rx_headroom;
 347
 348        u32 rx_buf_size;
 349        u32 rx_buf_seg_size;
 350
 351        struct sw_rx_data *sw_rx_ring;
 352        struct qed_chain rx_bd_ring;
 353        struct qed_chain rx_comp_ring ____cacheline_aligned;
 354
 355        /* GRO */
 356        struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
 357
 358        /* Used once per each NAPI run */
 359        u64 rcv_pkts;
 360
 361        u64 rx_hw_errors;
 362        u64 rx_alloc_errors;
 363        u64 rx_ip_frags;
 364
 365        u64 xdp_no_pass;
 366
 367        void *handle;
 368        struct xdp_rxq_info xdp_rxq;
 369};
 370
 371union db_prod {
 372        struct eth_db_data data;
 373        u32             raw;
 374};
 375
 376struct sw_tx_bd {
 377        struct sk_buff *skb;
 378        u8 flags;
 379/* Set on the first BD descriptor when there is a split BD */
 380#define QEDE_TSO_SPLIT_BD               BIT(0)
 381};
 382
 383struct sw_tx_xdp {
 384        struct page                     *page;
 385        struct xdp_frame                *xdpf;
 386        dma_addr_t                      mapping;
 387};
 388
 389struct qede_tx_queue {
 390        u8                              is_xdp;
 391        bool                            is_legacy;
 392        u16                             sw_tx_cons;
 393        u16                             sw_tx_prod;
 394        u16                             num_tx_buffers; /* Slowpath only */
 395
 396        u64                             xmit_pkts;
 397        u64                             stopped_cnt;
 398        u64                             tx_mem_alloc_err;
 399
 400        __le16                          *hw_cons_ptr;
 401
 402        /* Needed for the mapping of packets */
 403        struct device                   *dev;
 404
 405        void __iomem                    *doorbell_addr;
 406        union db_prod                   tx_db;
 407
 408        /* Spinlock for XDP queues in case of XDP_REDIRECT */
 409        spinlock_t                      xdp_tx_lock;
 410
 411        int                             index; /* Slowpath only */
 412#define QEDE_TXQ_XDP_TO_IDX(edev, txq)  ((txq)->index - \
 413                                         QEDE_MAX_TSS_CNT(edev))
 414#define QEDE_TXQ_IDX_TO_XDP(edev, idx)  ((idx) + QEDE_MAX_TSS_CNT(edev))
 415#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)    ((edev)->fp_num_rx + \
 416                                                 ((idx) % QEDE_TSS_COUNT(edev)))
 417#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)  ((idx) / QEDE_TSS_COUNT(edev))
 418#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq)      ((QEDE_TSS_COUNT(edev) * \
 419                                                 (txq)->cos) + (txq)->index)
 420#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx)      \
 421        (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
 422        [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
 423#define QEDE_FP_TC0_TXQ(fp)             (&((fp)->txq[0]))
 424
 425        /* Regular Tx requires skb + metadata for release purpose,
 426         * while XDP requires the pages and the mapped address.
 427         */
 428        union {
 429                struct sw_tx_bd         *skbs;
 430                struct sw_tx_xdp        *xdp;
 431        }                               sw_tx_ring;
 432
 433        struct qed_chain                tx_pbl;
 434
 435        /* Slowpath; Should be kept in end [unless missing padding] */
 436        void                            *handle;
 437        u16                             cos;
 438        u16                             ndev_txq_id;
 439};
 440
 441#define BD_UNMAP_ADDR(bd)               HILO_U64(le32_to_cpu((bd)->addr.hi), \
 442                                                 le32_to_cpu((bd)->addr.lo))
 443#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len)                           \
 444        do {                                                            \
 445                (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr));      \
 446                (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr));      \
 447                (bd)->nbytes = cpu_to_le16(len);                        \
 448        } while (0)
 449#define BD_UNMAP_LEN(bd)                (le16_to_cpu((bd)->nbytes))
 450
 451struct qede_fastpath {
 452        struct qede_dev                 *edev;
 453
 454        u8                              type;
 455#define QEDE_FASTPATH_TX                BIT(0)
 456#define QEDE_FASTPATH_RX                BIT(1)
 457#define QEDE_FASTPATH_XDP               BIT(2)
 458#define QEDE_FASTPATH_COMBINED          (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
 459
 460        u8                              id;
 461
 462        u8                              xdp_xmit;
 463#define QEDE_XDP_TX                     BIT(0)
 464#define QEDE_XDP_REDIRECT               BIT(1)
 465
 466        struct napi_struct              napi;
 467        struct qed_sb_info              *sb_info;
 468        struct qede_rx_queue            *rxq;
 469        struct qede_tx_queue            *txq;
 470        struct qede_tx_queue            *xdp_tx;
 471
 472        char                            name[IFNAMSIZ + 8];
 473};
 474
 475/* Debug print definitions */
 476#define DP_NAME(edev)                   netdev_name((edev)->ndev)
 477
 478#define XMIT_PLAIN                      0
 479#define XMIT_L4_CSUM                    BIT(0)
 480#define XMIT_LSO                        BIT(1)
 481#define XMIT_ENC                        BIT(2)
 482#define XMIT_ENC_GSO_L4_CSUM            BIT(3)
 483
 484#define QEDE_CSUM_ERROR                 BIT(0)
 485#define QEDE_CSUM_UNNECESSARY           BIT(1)
 486#define QEDE_TUNN_CSUM_UNNECESSARY      BIT(2)
 487
 488#define QEDE_SP_RECOVERY                0
 489#define QEDE_SP_RX_MODE                 1
 490#define QEDE_SP_RSVD1                   2
 491#define QEDE_SP_RSVD2                   3
 492#define QEDE_SP_HW_ERR                  4
 493#define QEDE_SP_ARFS_CONFIG             5
 494#define QEDE_SP_AER                     7
 495#define QEDE_SP_DISABLE                 8
 496
 497#ifdef CONFIG_RFS_ACCEL
 498int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 499                       u16 rxq_index, u32 flow_id);
 500#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
 501#endif
 502
 503void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
 504void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
 505void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
 506void qede_free_arfs(struct qede_dev *edev);
 507int qede_alloc_arfs(struct qede_dev *edev);
 508int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
 509int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
 510int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
 511int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
 512                          u32 *rule_locs);
 513int qede_get_arfs_filter_count(struct qede_dev *edev);
 514
 515struct qede_reload_args {
 516        void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
 517        union {
 518                netdev_features_t features;
 519                struct bpf_prog *new_prog;
 520                u16 mtu;
 521        } u;
 522};
 523
 524/* Datapath functions definition */
 525netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
 526int qede_xdp_transmit(struct net_device *dev, int n_frames,
 527                      struct xdp_frame **frames, u32 flags);
 528u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
 529                      struct net_device *sb_dev);
 530netdev_features_t qede_features_check(struct sk_buff *skb,
 531                                      struct net_device *dev,
 532                                      netdev_features_t features);
 533int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
 534int qede_free_tx_pkt(struct qede_dev *edev,
 535                     struct qede_tx_queue *txq, int *len);
 536int qede_poll(struct napi_struct *napi, int budget);
 537irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
 538
 539/* Filtering function definitions */
 540void qede_force_mac(void *dev, u8 *mac, bool forced);
 541void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
 542int qede_set_mac_addr(struct net_device *ndev, void *p);
 543
 544int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
 545int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
 546void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
 547int qede_configure_vlan_filters(struct qede_dev *edev);
 548
 549netdev_features_t qede_fix_features(struct net_device *dev,
 550                                    netdev_features_t features);
 551int qede_set_features(struct net_device *dev, netdev_features_t features);
 552void qede_set_rx_mode(struct net_device *ndev);
 553void qede_config_rx_mode(struct net_device *ndev);
 554void qede_fill_rss_params(struct qede_dev *edev,
 555                          struct qed_update_vport_rss_params *rss, u8 *update);
 556
 557void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
 558void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
 559
 560int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
 561
 562#ifdef CONFIG_DCB
 563void qede_set_dcbnl_ops(struct net_device *ndev);
 564#endif
 565
 566void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
 567void qede_set_ethtool_ops(struct net_device *netdev);
 568void qede_set_udp_tunnels(struct qede_dev *edev);
 569void qede_reload(struct qede_dev *edev,
 570                 struct qede_reload_args *args, bool is_locked);
 571int qede_change_mtu(struct net_device *dev, int new_mtu);
 572void qede_fill_by_demand_stats(struct qede_dev *edev);
 573void __qede_lock(struct qede_dev *edev);
 574void __qede_unlock(struct qede_dev *edev);
 575bool qede_has_rx_work(struct qede_rx_queue *rxq);
 576int qede_txq_has_work(struct qede_tx_queue *txq);
 577void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
 578void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
 579int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
 580                            struct flow_cls_offload *f);
 581
 582void qede_forced_speed_maps_init(void);
 583int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
 584                      struct kernel_ethtool_coalesce *kernel_coal,
 585                      struct netlink_ext_ack *extack);
 586int qede_set_per_coalesce(struct net_device *dev, u32 queue,
 587                          struct ethtool_coalesce *coal);
 588
 589#define RX_RING_SIZE_POW        13
 590#define RX_RING_SIZE            ((u16)BIT(RX_RING_SIZE_POW))
 591#define NUM_RX_BDS_MAX          (RX_RING_SIZE - 1)
 592#define NUM_RX_BDS_MIN          128
 593#define NUM_RX_BDS_KDUMP_MIN    63
 594#define NUM_RX_BDS_DEF          ((u16)BIT(10) - 1)
 595
 596#define TX_RING_SIZE_POW        13
 597#define TX_RING_SIZE            ((u16)BIT(TX_RING_SIZE_POW))
 598#define NUM_TX_BDS_MAX          (TX_RING_SIZE - 1)
 599#define NUM_TX_BDS_MIN          128
 600#define NUM_TX_BDS_KDUMP_MIN    63
 601#define NUM_TX_BDS_DEF          NUM_TX_BDS_MAX
 602
 603#define QEDE_MIN_PKT_LEN                64
 604#define QEDE_RX_HDR_SIZE                256
 605#define QEDE_MAX_JUMBO_PACKET_SIZE      9600
 606#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
 607#define for_each_cos_in_txq(edev, var) \
 608        for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
 609
 610#endif /* _QEDE_H_ */
 611