linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/ptp_clock_kernel.h>
  40#include <linux/crash_dump.h>
  41#include <linux/mlx5/driver.h>
  42#include <linux/mlx5/qp.h>
  43#include <linux/mlx5/cq.h>
  44#include <linux/mlx5/port.h>
  45#include <linux/mlx5/vport.h>
  46#include <linux/mlx5/transobj.h>
  47#include <linux/mlx5/fs.h>
  48#include <linux/rhashtable.h>
  49#include <net/switchdev.h>
  50#include <net/xdp.h>
  51#include <linux/net_dim.h>
  52#include "wq.h"
  53#include "mlx5_core.h"
  54#include "en_stats.h"
  55
  56#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  57
  58#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  59
  60#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
  61#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
  62
  63#define MLX5E_MAX_DSCP          64
  64#define MLX5E_MAX_NUM_TC        8
  65
  66#define MLX5_RX_HEADROOM NET_SKB_PAD
  67#define MLX5_SKB_FRAG_SZ(len)   (SKB_DATA_ALIGN(len) +  \
  68                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  69
  70#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
  71        (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
  72#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
  73        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
  74#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
  75#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
  76#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
  77        (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
  78        MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
  79
  80#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  81#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  82                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  83#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  84
  85#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  86#define MLX5E_REQUIRED_WQE_MTTS         (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
  87#define MLX5E_LOG_ALIGNED_MPWQE_PPW     (ilog2(MLX5E_REQUIRED_WQE_MTTS))
  88#define MLX5E_REQUIRED_MTTS(wqes)       (wqes * MLX5E_REQUIRED_WQE_MTTS)
  89#define MLX5E_MAX_RQ_NUM_MTTS   \
  90        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
  91#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
  92#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW    \
  93                (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
  94#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
  95        (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
  96         (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
  97
  98#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
  99#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 100#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 101
 102#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
 103#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 104#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
 105                                               MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
 106
 107#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
 108
 109#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD       (256)
 110
 111#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 112#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
 113#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
 114
 115#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 116#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
 117#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 118#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 119#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
 120#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 121#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 122#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 123
 124#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 125#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 126#define MLX5E_MIN_NUM_CHANNELS         0x1
 127#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 128#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 129#define MLX5E_TX_CQ_POLL_BUDGET        128
 130#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 131#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 132
 133#define MLX5E_UMR_WQE_INLINE_SZ \
 134        (sizeof(struct mlx5e_umr_wqe) + \
 135         ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
 136               MLX5_UMR_MTT_ALIGNMENT))
 137#define MLX5E_UMR_WQEBBS \
 138        (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
 139#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
 140
 141#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 142#define MLX5E_XDP_TX_DS_COUNT \
 143        ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
 144
 145#define MLX5E_NUM_MAIN_GROUPS 9
 146
 147#define MLX5E_MSG_LEVEL                 NETIF_MSG_LINK
 148
 149#define mlx5e_dbg(mlevel, priv, format, ...)                    \
 150do {                                                            \
 151        if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
 152                netdev_warn(priv->netdev, format,               \
 153                            ##__VA_ARGS__);                     \
 154} while (0)
 155
 156
 157static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 158{
 159        switch (wq_type) {
 160        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 161                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 162                             wq_size / 2);
 163        default:
 164                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 165                             wq_size / 2);
 166        }
 167}
 168
 169static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 170{
 171        return is_kdump_kernel() ?
 172                MLX5E_MIN_NUM_CHANNELS :
 173                min_t(int, mdev->priv.eq_table.num_comp_vectors,
 174                      MLX5E_MAX_NUM_CHANNELS);
 175}
 176
 177struct mlx5e_tx_wqe {
 178        struct mlx5_wqe_ctrl_seg ctrl;
 179        struct mlx5_wqe_eth_seg  eth;
 180};
 181
 182struct mlx5e_rx_wqe {
 183        struct mlx5_wqe_srq_next_seg  next;
 184        struct mlx5_wqe_data_seg      data;
 185};
 186
 187struct mlx5e_umr_wqe {
 188        struct mlx5_wqe_ctrl_seg       ctrl;
 189        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 190        struct mlx5_mkey_seg           mkc;
 191        struct mlx5_mtt                inline_mtts[0];
 192};
 193
 194extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
 195
 196static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
 197        "rx_cqe_moder",
 198        "tx_cqe_moder",
 199        "rx_cqe_compress",
 200        "rx_striding_rq",
 201};
 202
 203enum mlx5e_priv_flag {
 204        MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
 205        MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
 206        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
 207        MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
 208};
 209
 210#define MLX5E_SET_PFLAG(params, pflag, enable)                  \
 211        do {                                                    \
 212                if (enable)                                     \
 213                        (params)->pflags |= (pflag);            \
 214                else                                            \
 215                        (params)->pflags &= ~(pflag);           \
 216        } while (0)
 217
 218#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
 219
 220#ifdef CONFIG_MLX5_CORE_EN_DCB
 221#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 222#endif
 223
 224struct mlx5e_params {
 225        u8  log_sq_size;
 226        u8  rq_wq_type;
 227        u8  log_rq_mtu_frames;
 228        u16 num_channels;
 229        u8  num_tc;
 230        bool rx_cqe_compress_def;
 231        struct net_dim_cq_moder rx_cq_moderation;
 232        struct net_dim_cq_moder tx_cq_moderation;
 233        bool lro_en;
 234        u32 lro_wqe_sz;
 235        u8  tx_min_inline_mode;
 236        u8  rss_hfunc;
 237        u8  toeplitz_hash_key[40];
 238        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 239        bool vlan_strip_disable;
 240        bool scatter_fcs_en;
 241        bool rx_dim_enabled;
 242        u32 lro_timeout;
 243        u32 pflags;
 244        struct bpf_prog *xdp_prog;
 245        unsigned int sw_mtu;
 246        int hard_mtu;
 247};
 248
 249#ifdef CONFIG_MLX5_CORE_EN_DCB
 250struct mlx5e_cee_config {
 251        /* bw pct for priority group */
 252        u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
 253        u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
 254        bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
 255        bool                       pfc_enable;
 256};
 257
 258enum {
 259        MLX5_DCB_CHG_RESET,
 260        MLX5_DCB_NO_CHG,
 261        MLX5_DCB_CHG_NO_RESET,
 262};
 263
 264struct mlx5e_dcbx {
 265        enum mlx5_dcbx_oper_mode   mode;
 266        struct mlx5e_cee_config    cee_cfg; /* pending configuration */
 267        u8                         dscp_app_cnt;
 268
 269        /* The only setting that cannot be read from FW */
 270        u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
 271        u8                         cap;
 272};
 273
 274struct mlx5e_dcbx_dp {
 275        u8                         dscp2prio[MLX5E_MAX_DSCP];
 276        u8                         trust_state;
 277};
 278#endif
 279
 280enum {
 281        MLX5E_RQ_STATE_ENABLED,
 282        MLX5E_RQ_STATE_AM,
 283};
 284
 285#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))
 286
 287struct mlx5e_cq {
 288        /* data path - accessed per cqe */
 289        struct mlx5_cqwq           wq;
 290
 291        /* data path - accessed per napi poll */
 292        u16                        event_ctr;
 293        struct napi_struct        *napi;
 294        struct mlx5_core_cq        mcq;
 295        struct mlx5e_channel      *channel;
 296
 297        /* cqe decompression */
 298        struct mlx5_cqe64          title;
 299        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 300        u8                         mini_arr_idx;
 301        u16                        decmprs_left;
 302        u16                        decmprs_wqe_counter;
 303
 304        /* control */
 305        struct mlx5_core_dev      *mdev;
 306        struct mlx5_frag_wq_ctrl   wq_ctrl;
 307} ____cacheline_aligned_in_smp;
 308
 309struct mlx5e_tx_wqe_info {
 310        struct sk_buff *skb;
 311        u32 num_bytes;
 312        u8  num_wqebbs;
 313        u8  num_dma;
 314};
 315
 316enum mlx5e_dma_map_type {
 317        MLX5E_DMA_MAP_SINGLE,
 318        MLX5E_DMA_MAP_PAGE
 319};
 320
 321struct mlx5e_sq_dma {
 322        dma_addr_t              addr;
 323        u32                     size;
 324        enum mlx5e_dma_map_type type;
 325};
 326
 327enum {
 328        MLX5E_SQ_STATE_ENABLED,
 329        MLX5E_SQ_STATE_RECOVERING,
 330        MLX5E_SQ_STATE_IPSEC,
 331};
 332
 333struct mlx5e_sq_wqe_info {
 334        u8  opcode;
 335};
 336
 337struct mlx5e_txqsq {
 338        /* data path */
 339
 340        /* dirtied @completion */
 341        u16                        cc;
 342        u32                        dma_fifo_cc;
 343
 344        /* dirtied @xmit */
 345        u16                        pc ____cacheline_aligned_in_smp;
 346        u32                        dma_fifo_pc;
 347        struct mlx5e_sq_stats      stats;
 348
 349        struct mlx5e_cq            cq;
 350
 351        /* write@xmit, read@completion */
 352        struct {
 353                struct mlx5e_sq_dma       *dma_fifo;
 354                struct mlx5e_tx_wqe_info  *wqe_info;
 355        } db;
 356
 357        /* read only */
 358        struct mlx5_wq_cyc         wq;
 359        u32                        dma_fifo_mask;
 360        void __iomem              *uar_map;
 361        struct netdev_queue       *txq;
 362        u32                        sqn;
 363        u8                         min_inline_mode;
 364        u16                        edge;
 365        struct device             *pdev;
 366        __be32                     mkey_be;
 367        unsigned long              state;
 368        struct hwtstamp_config    *tstamp;
 369        struct mlx5_clock         *clock;
 370
 371        /* control path */
 372        struct mlx5_wq_ctrl        wq_ctrl;
 373        struct mlx5e_channel      *channel;
 374        int                        txq_ix;
 375        u32                        rate_limit;
 376        struct mlx5e_txqsq_recover {
 377                struct work_struct         recover_work;
 378                u64                        last_recover;
 379        } recover;
 380} ____cacheline_aligned_in_smp;
 381
 382struct mlx5e_xdpsq {
 383        /* data path */
 384
 385        /* dirtied @rx completion */
 386        u16                        cc;
 387        u16                        pc;
 388
 389        struct mlx5e_cq            cq;
 390
 391        /* write@xmit, read@completion */
 392        struct {
 393                struct mlx5e_dma_info     *di;
 394                bool                       doorbell;
 395        } db;
 396
 397        /* read only */
 398        struct mlx5_wq_cyc         wq;
 399        void __iomem              *uar_map;
 400        u32                        sqn;
 401        struct device             *pdev;
 402        __be32                     mkey_be;
 403        u8                         min_inline_mode;
 404        unsigned long              state;
 405
 406        /* control path */
 407        struct mlx5_wq_ctrl        wq_ctrl;
 408        struct mlx5e_channel      *channel;
 409} ____cacheline_aligned_in_smp;
 410
 411struct mlx5e_icosq {
 412        /* data path */
 413
 414        /* dirtied @xmit */
 415        u16                        pc ____cacheline_aligned_in_smp;
 416
 417        struct mlx5e_cq            cq;
 418
 419        /* write@xmit, read@completion */
 420        struct {
 421                struct mlx5e_sq_wqe_info *ico_wqe;
 422        } db;
 423
 424        /* read only */
 425        struct mlx5_wq_cyc         wq;
 426        void __iomem              *uar_map;
 427        u32                        sqn;
 428        u16                        edge;
 429        unsigned long              state;
 430
 431        /* control path */
 432        struct mlx5_wq_ctrl        wq_ctrl;
 433        struct mlx5e_channel      *channel;
 434} ____cacheline_aligned_in_smp;
 435
 436static inline bool
 437mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
 438{
 439        return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
 440}
 441
 442struct mlx5e_dma_info {
 443        struct page     *page;
 444        dma_addr_t      addr;
 445};
 446
 447struct mlx5e_wqe_frag_info {
 448        struct mlx5e_dma_info di;
 449        u32 offset;
 450};
 451
 452struct mlx5e_umr_dma_info {
 453        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 454};
 455
 456struct mlx5e_mpw_info {
 457        struct mlx5e_umr_dma_info umr;
 458        u16 consumed_strides;
 459        DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 460};
 461
 462/* a single cache unit is capable to serve one napi call (for non-striding rq)
 463 * or a MPWQE (for striding rq).
 464 */
 465#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 466                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 467#define MLX5E_CACHE_SIZE        (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 468struct mlx5e_page_cache {
 469        u32 head;
 470        u32 tail;
 471        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 472};
 473
 474struct mlx5e_rq;
 475typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 476typedef struct sk_buff *
 477(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 478                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 479typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 480typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 481
 482enum mlx5e_rq_flag {
 483        MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
 484};
 485
 486struct mlx5e_rq {
 487        /* data path */
 488        struct mlx5_wq_ll      wq;
 489
 490        union {
 491                struct {
 492                        struct mlx5e_wqe_frag_info *frag_info;
 493                        u32 frag_sz;    /* max possible skb frag_sz */
 494                        union {
 495                                bool page_reuse;
 496                        };
 497                } wqe;
 498                struct {
 499                        struct mlx5e_umr_wqe   umr_wqe;
 500                        struct mlx5e_mpw_info *info;
 501                        mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
 502                        u16                    num_strides;
 503                        u8                     log_stride_sz;
 504                        bool                   umr_in_progress;
 505                } mpwqe;
 506        };
 507        struct {
 508                u16            headroom;
 509                u8             page_order;
 510                u8             map_dir;   /* dma map direction */
 511        } buff;
 512
 513        struct mlx5e_channel  *channel;
 514        struct device         *pdev;
 515        struct net_device     *netdev;
 516        struct mlx5e_rq_stats  stats;
 517        struct mlx5e_cq        cq;
 518        struct mlx5e_page_cache page_cache;
 519        struct hwtstamp_config *tstamp;
 520        struct mlx5_clock      *clock;
 521
 522        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 523        mlx5e_fp_post_rx_wqes  post_wqes;
 524        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 525
 526        unsigned long          state;
 527        int                    ix;
 528
 529        struct net_dim         dim; /* Dynamic Interrupt Moderation */
 530
 531        /* XDP */
 532        struct bpf_prog       *xdp_prog;
 533        unsigned int           hw_mtu;
 534        struct mlx5e_xdpsq     xdpsq;
 535        DECLARE_BITMAP(flags, 8);
 536
 537        /* control */
 538        struct mlx5_wq_ctrl    wq_ctrl;
 539        __be32                 mkey_be;
 540        u8                     wq_type;
 541        u32                    rqn;
 542        struct mlx5_core_dev  *mdev;
 543        struct mlx5_core_mkey  umr_mkey;
 544
 545        /* XDP read-mostly */
 546        struct xdp_rxq_info    xdp_rxq;
 547} ____cacheline_aligned_in_smp;
 548
 549struct mlx5e_channel {
 550        /* data path */
 551        struct mlx5e_rq            rq;
 552        struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
 553        struct mlx5e_icosq         icosq;   /* internal control operations */
 554        bool                       xdp;
 555        struct napi_struct         napi;
 556        struct device             *pdev;
 557        struct net_device         *netdev;
 558        __be32                     mkey_be;
 559        u8                         num_tc;
 560
 561        /* data path - accessed per napi poll */
 562        struct irq_desc *irq_desc;
 563        struct mlx5e_ch_stats      stats;
 564
 565        /* control */
 566        struct mlx5e_priv         *priv;
 567        struct mlx5_core_dev      *mdev;
 568        struct hwtstamp_config    *tstamp;
 569        int                        ix;
 570        int                        cpu;
 571};
 572
 573struct mlx5e_channels {
 574        struct mlx5e_channel **c;
 575        unsigned int           num;
 576        struct mlx5e_params    params;
 577};
 578
 579enum mlx5e_traffic_types {
 580        MLX5E_TT_IPV4_TCP,
 581        MLX5E_TT_IPV6_TCP,
 582        MLX5E_TT_IPV4_UDP,
 583        MLX5E_TT_IPV6_UDP,
 584        MLX5E_TT_IPV4_IPSEC_AH,
 585        MLX5E_TT_IPV6_IPSEC_AH,
 586        MLX5E_TT_IPV4_IPSEC_ESP,
 587        MLX5E_TT_IPV6_IPSEC_ESP,
 588        MLX5E_TT_IPV4,
 589        MLX5E_TT_IPV6,
 590        MLX5E_TT_ANY,
 591        MLX5E_NUM_TT,
 592        MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
 593};
 594
 595enum mlx5e_tunnel_types {
 596        MLX5E_TT_IPV4_GRE,
 597        MLX5E_TT_IPV6_GRE,
 598        MLX5E_NUM_TUNNEL_TT,
 599};
 600
 601enum {
 602        MLX5E_STATE_ASYNC_EVENTS_ENABLED,
 603        MLX5E_STATE_OPENED,
 604        MLX5E_STATE_DESTROYING,
 605};
 606
 607struct mlx5e_vxlan_db {
 608        spinlock_t                      lock; /* protect vxlan table */
 609        struct radix_tree_root          tree;
 610};
 611
 612struct mlx5e_l2_rule {
 613        u8  addr[ETH_ALEN + 2];
 614        struct mlx5_flow_handle *rule;
 615};
 616
 617struct mlx5e_flow_table {
 618        int num_groups;
 619        struct mlx5_flow_table *t;
 620        struct mlx5_flow_group **g;
 621};
 622
 623#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
 624
 625struct mlx5e_tc_table {
 626        struct mlx5_flow_table          *t;
 627
 628        struct rhashtable_params        ht_params;
 629        struct rhashtable               ht;
 630
 631        DECLARE_HASHTABLE(mod_hdr_tbl, 8);
 632        DECLARE_HASHTABLE(hairpin_tbl, 8);
 633};
 634
 635struct mlx5e_vlan_table {
 636        struct mlx5e_flow_table         ft;
 637        DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
 638        DECLARE_BITMAP(active_svlans, VLAN_N_VID);
 639        struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
 640        struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
 641        struct mlx5_flow_handle *untagged_rule;
 642        struct mlx5_flow_handle *any_cvlan_rule;
 643        struct mlx5_flow_handle *any_svlan_rule;
 644        bool                    cvlan_filter_disabled;
 645};
 646
 647struct mlx5e_l2_table {
 648        struct mlx5e_flow_table    ft;
 649        struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
 650        struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
 651        struct mlx5e_l2_rule       broadcast;
 652        struct mlx5e_l2_rule       allmulti;
 653        struct mlx5e_l2_rule       promisc;
 654        bool                       broadcast_enabled;
 655        bool                       allmulti_enabled;
 656        bool                       promisc_enabled;
 657};
 658
 659/* L3/L4 traffic type classifier */
 660struct mlx5e_ttc_table {
 661        struct mlx5e_flow_table  ft;
 662        struct mlx5_flow_handle  *rules[MLX5E_NUM_TT];
 663        struct mlx5_flow_handle  *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
 664};
 665
 666#define ARFS_HASH_SHIFT BITS_PER_BYTE
 667#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
 668struct arfs_table {
 669        struct mlx5e_flow_table  ft;
 670        struct mlx5_flow_handle  *default_rule;
 671        struct hlist_head        rules_hash[ARFS_HASH_SIZE];
 672};
 673
 674enum  arfs_type {
 675        ARFS_IPV4_TCP,
 676        ARFS_IPV6_TCP,
 677        ARFS_IPV4_UDP,
 678        ARFS_IPV6_UDP,
 679        ARFS_NUM_TYPES,
 680};
 681
 682struct mlx5e_arfs_tables {
 683        struct arfs_table arfs_tables[ARFS_NUM_TYPES];
 684        /* Protect aRFS rules list */
 685        spinlock_t                     arfs_lock;
 686        struct list_head               rules;
 687        int                            last_filter_id;
 688        struct workqueue_struct        *wq;
 689};
 690
 691/* NIC prio FTS */
 692enum {
 693        MLX5E_VLAN_FT_LEVEL = 0,
 694        MLX5E_L2_FT_LEVEL,
 695        MLX5E_TTC_FT_LEVEL,
 696        MLX5E_INNER_TTC_FT_LEVEL,
 697        MLX5E_ARFS_FT_LEVEL
 698};
 699
 700enum {
 701        MLX5E_TC_FT_LEVEL = 0,
 702        MLX5E_TC_TTC_FT_LEVEL,
 703};
 704
 705struct mlx5e_ethtool_table {
 706        struct mlx5_flow_table *ft;
 707        int                    num_rules;
 708};
 709
 710#define ETHTOOL_NUM_L3_L4_FTS 7
 711#define ETHTOOL_NUM_L2_FTS 4
 712
 713struct mlx5e_ethtool_steering {
 714        struct mlx5e_ethtool_table      l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
 715        struct mlx5e_ethtool_table      l2_ft[ETHTOOL_NUM_L2_FTS];
 716        struct list_head                rules;
 717        int                             tot_num_rules;
 718};
 719
 720struct mlx5e_flow_steering {
 721        struct mlx5_flow_namespace      *ns;
 722        struct mlx5e_ethtool_steering   ethtool;
 723        struct mlx5e_tc_table           tc;
 724        struct mlx5e_vlan_table         vlan;
 725        struct mlx5e_l2_table           l2;
 726        struct mlx5e_ttc_table          ttc;
 727        struct mlx5e_ttc_table          inner_ttc;
 728        struct mlx5e_arfs_tables        arfs;
 729};
 730
 731struct mlx5e_rqt {
 732        u32              rqtn;
 733        bool             enabled;
 734};
 735
 736struct mlx5e_tir {
 737        u32               tirn;
 738        struct mlx5e_rqt  rqt;
 739        struct list_head  list;
 740};
 741
 742enum {
 743        MLX5E_TC_PRIO = 0,
 744        MLX5E_NIC_PRIO
 745};
 746
 747struct mlx5e_priv {
 748        /* priv data path fields - start */
 749        struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
 750        int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 751#ifdef CONFIG_MLX5_CORE_EN_DCB
 752        struct mlx5e_dcbx_dp       dcbx_dp;
 753#endif
 754        /* priv data path fields - end */
 755
 756        u32                        msglevel;
 757        unsigned long              state;
 758        struct mutex               state_lock; /* Protects Interface state */
 759        struct mlx5e_rq            drop_rq;
 760
 761        struct mlx5e_channels      channels;
 762        u32                        tisn[MLX5E_MAX_NUM_TC];
 763        struct mlx5e_rqt           indir_rqt;
 764        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 765        struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
 766        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 767        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 768
 769        struct mlx5e_flow_steering fs;
 770        struct mlx5e_vxlan_db      vxlan;
 771
 772        struct workqueue_struct    *wq;
 773        struct work_struct         update_carrier_work;
 774        struct work_struct         set_rx_mode_work;
 775        struct work_struct         tx_timeout_work;
 776        struct delayed_work        update_stats_work;
 777
 778        struct mlx5_core_dev      *mdev;
 779        struct net_device         *netdev;
 780        struct mlx5e_stats         stats;
 781        struct hwtstamp_config     tstamp;
 782        u16                        q_counter;
 783        u16                        drop_rq_q_counter;
 784#ifdef CONFIG_MLX5_CORE_EN_DCB
 785        struct mlx5e_dcbx          dcbx;
 786#endif
 787
 788        const struct mlx5e_profile *profile;
 789        void                      *ppriv;
 790#ifdef CONFIG_MLX5_EN_IPSEC
 791        struct mlx5e_ipsec        *ipsec;
 792#endif
 793};
 794
 795struct mlx5e_profile {
 796        void    (*init)(struct mlx5_core_dev *mdev,
 797                        struct net_device *netdev,
 798                        const struct mlx5e_profile *profile, void *ppriv);
 799        void    (*cleanup)(struct mlx5e_priv *priv);
 800        int     (*init_rx)(struct mlx5e_priv *priv);
 801        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 802        int     (*init_tx)(struct mlx5e_priv *priv);
 803        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 804        void    (*enable)(struct mlx5e_priv *priv);
 805        void    (*disable)(struct mlx5e_priv *priv);
 806        void    (*update_stats)(struct mlx5e_priv *priv);
 807        void    (*update_carrier)(struct mlx5e_priv *priv);
 808        int     (*max_nch)(struct mlx5_core_dev *mdev);
 809        struct {
 810                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 811                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
 812        } rx_handlers;
 813        void    (*netdev_registered_init)(struct mlx5e_priv *priv);
 814        void    (*netdev_registered_remove)(struct mlx5e_priv *priv);
 815        int     max_tc;
 816};
 817
 818void mlx5e_build_ptys2ethtool_map(void);
 819
 820u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 821                       void *accel_priv, select_queue_fallback_t fallback);
 822netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 823
 824void mlx5e_completion_event(struct mlx5_core_cq *mcq);
 825void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 826int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 827bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 828int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 829bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
 830void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
 831void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
 832
 833bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 834bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
 835                                struct mlx5e_params *params);
 836
 837void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
 838                        bool recycle);
 839void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 840void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 841bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 842bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
 843void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 844void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 845void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 846struct sk_buff *
 847mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 848                                u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 849struct sk_buff *
 850mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 851                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 852
 853void mlx5e_update_stats(struct mlx5e_priv *priv);
 854
 855int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
 856void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
 857void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 858void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
 859int mlx5e_self_test_num(struct mlx5e_priv *priv);
 860void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
 861                     u64 *buf);
 862int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
 863                           int location);
 864int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
 865                                struct ethtool_rxnfc *info, u32 *rule_locs);
 866int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
 867                               struct ethtool_rx_flow_spec *fs);
 868int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
 869                              int location);
 870void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
 871void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
 872void mlx5e_set_rx_mode_work(struct work_struct *work);
 873
 874int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 875int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
 876int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 877
 878int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 879                          u16 vid);
 880int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 881                           u16 vid);
 882void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
 883void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
 884void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 885
 886struct mlx5e_redirect_rqt_param {
 887        bool is_rss;
 888        union {
 889                u32 rqn; /* Direct RQN (Non-RSS) */
 890                struct {
 891                        u8 hfunc;
 892                        struct mlx5e_channels *channels;
 893                } rss; /* RSS data */
 894        };
 895};
 896
 897int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
 898                       struct mlx5e_redirect_rqt_param rrp);
 899void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
 900                                    enum mlx5e_traffic_types tt,
 901                                    void *tirc, bool inner);
 902
 903int mlx5e_open_locked(struct net_device *netdev);
 904int mlx5e_close_locked(struct net_device *netdev);
 905
 906int mlx5e_open_channels(struct mlx5e_priv *priv,
 907                        struct mlx5e_channels *chs);
 908void mlx5e_close_channels(struct mlx5e_channels *chs);
 909
 910/* Function pointer to be used to modify WH settings while
 911 * switching channels
 912 */
 913typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
 914void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
 915                                struct mlx5e_channels *new_chs,
 916                                mlx5e_fp_hw_modify hw_modify);
 917void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
 918void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 919
 920void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
 921                                   int num_channels);
 922int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 923
 924void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
 925                                 u8 cq_period_mode);
 926void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
 927                                 u8 cq_period_mode);
 928void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 929void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
 930                               struct mlx5e_params *params);
 931
 932static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 933{
 934        return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
 935                MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
 936}
 937
 938static inline
 939struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
 940{
 941        u16                         pi   = *pc & wq->sz_m1;
 942        struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 943        struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
 944
 945        memset(cseg, 0, sizeof(*cseg));
 946
 947        cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
 948        cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
 949
 950        (*pc)++;
 951
 952        return wqe;
 953}
 954
 955static inline
 956void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
 957                     void __iomem *uar_map,
 958                     struct mlx5_wqe_ctrl_seg *ctrl)
 959{
 960        ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 961        /* ensure wqe is visible to device before updating doorbell record */
 962        dma_wmb();
 963
 964        *wq->db = cpu_to_be32(pc);
 965
 966        /* ensure doorbell record is visible to device before ringing the
 967         * doorbell
 968         */
 969        wmb();
 970
 971        mlx5_write64((__be32 *)ctrl, uar_map, NULL);
 972}
 973
 974static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 975{
 976        struct mlx5_core_cq *mcq;
 977
 978        mcq = &cq->mcq;
 979        mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
 980}
 981
 982extern const struct ethtool_ops mlx5e_ethtool_ops;
 983#ifdef CONFIG_MLX5_CORE_EN_DCB
 984extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 985int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 986void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
 987void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
 988void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
 989#endif
 990
 991#ifndef CONFIG_RFS_ACCEL
 992static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
 993{
 994        return 0;
 995}
 996
 997static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 998
 999static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
1000{
1001        return -EOPNOTSUPP;
1002}
1003
1004static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
1005{
1006        return -EOPNOTSUPP;
1007}
1008#else
1009int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
1010void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
1011int mlx5e_arfs_enable(struct mlx5e_priv *priv);
1012int mlx5e_arfs_disable(struct mlx5e_priv *priv);
1013int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1014                        u16 rxq_index, u32 flow_id);
1015#endif
1016
1017int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1018                     struct mlx5e_tir *tir, u32 *in, int inlen);
1019void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1020                       struct mlx5e_tir *tir);
1021int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1022void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1023int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
1024
1025/* common netdev helpers */
1026int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1027
1028int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
1029void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
1030
1031int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
1032void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
1033int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
1034void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
1035void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1036
1037struct ttc_params {
1038        struct mlx5_flow_table_attr ft_attr;
1039        u32 any_tt_tirn;
1040        u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
1041        struct mlx5e_ttc_table *inner_ttc;
1042};
1043
1044void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
1045void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
1046void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
1047
1048int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1049                           struct mlx5e_ttc_table *ttc);
1050void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1051                             struct mlx5e_ttc_table *ttc);
1052
1053int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1054                                 struct mlx5e_ttc_table *ttc);
1055void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1056                                   struct mlx5e_ttc_table *ttc);
1057
1058int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
1059                     u32 underlay_qpn, u32 *tisn);
1060void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1061
1062int mlx5e_create_tises(struct mlx5e_priv *priv);
1063void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
1064int mlx5e_close(struct net_device *netdev);
1065int mlx5e_open(struct net_device *netdev);
1066void mlx5e_update_stats_work(struct work_struct *work);
1067
1068int mlx5e_bits_invert(unsigned long a, int size);
1069
1070/* ethtool helpers */
1071void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1072                               struct ethtool_drvinfo *drvinfo);
1073void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1074                               uint32_t stringset, uint8_t *data);
1075int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1076void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1077                                     struct ethtool_stats *stats, u64 *data);
1078void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1079                                 struct ethtool_ringparam *param);
1080int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1081                                struct ethtool_ringparam *param);
1082void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1083                                struct ethtool_channels *ch);
1084int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1085                               struct ethtool_channels *ch);
1086int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1087                               struct ethtool_coalesce *coal);
1088int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1089                               struct ethtool_coalesce *coal);
1090int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1091                              struct ethtool_ts_info *info);
1092int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1093                               struct ethtool_flash *flash);
1094
1095int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1096                            void *cb_priv);
1097
1098/* mlx5e generic netdev management API */
1099struct net_device*
1100mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1101                    void *ppriv);
1102int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1103void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1104void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1105void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
1106                            struct mlx5e_params *params,
1107                            u16 max_channels, u16 mtu);
1108u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
1109void mlx5e_rx_dim_work(struct work_struct *work);
1110#endif /* __MLX5_EN_H__ */
1111