linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/ptp_clock_kernel.h>
  40#include <linux/crash_dump.h>
  41#include <linux/mlx5/driver.h>
  42#include <linux/mlx5/qp.h>
  43#include <linux/mlx5/cq.h>
  44#include <linux/mlx5/port.h>
  45#include <linux/mlx5/vport.h>
  46#include <linux/mlx5/transobj.h>
  47#include <linux/mlx5/fs.h>
  48#include <linux/rhashtable.h>
  49#include <net/switchdev.h>
  50#include <net/xdp.h>
  51#include <linux/dim.h>
  52#include <linux/bits.h>
  53#include "wq.h"
  54#include "mlx5_core.h"
  55#include "en_stats.h"
  56#include "en/fs.h"
  57
  58extern const struct net_device_ops mlx5e_netdev_ops;
  59struct page_pool;
  60
  61#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
  62#define MLX5E_METADATA_ETHER_LEN 8
  63
  64#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  65
  66#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  67
  68#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
  69#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
  70
  71#define MLX5E_MAX_PRIORITY      8
  72#define MLX5E_MAX_DSCP          64
  73#define MLX5E_MAX_NUM_TC        8
  74
  75#define MLX5_RX_HEADROOM NET_SKB_PAD
  76#define MLX5_SKB_FRAG_SZ(len)   (SKB_DATA_ALIGN(len) +  \
  77                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  78
  79#define MLX5E_RX_MAX_HEAD (256)
  80
  81#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
  82        (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
  83#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
  84        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
  85#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
  86        MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
  87
  88#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  89#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  90                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  91#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  92
  93#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  94#define MLX5E_REQUIRED_WQE_MTTS         (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
  95#define MLX5E_LOG_ALIGNED_MPWQE_PPW     (ilog2(MLX5E_REQUIRED_WQE_MTTS))
  96#define MLX5E_REQUIRED_MTTS(wqes)       (wqes * MLX5E_REQUIRED_WQE_MTTS)
  97#define MLX5E_MAX_RQ_NUM_MTTS   \
  98        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
  99#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
 100#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW    \
 101                (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
 102#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
 103        (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
 104         (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
 105
 106#define MLX5E_MIN_SKB_FRAG_SZ           (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
 107#define MLX5E_LOG_MAX_RX_WQE_BULK       \
 108        (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
 109
 110#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 111#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 112#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 113
 114#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
 115#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 116#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
 117                                               MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
 118
 119#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
 120
 121#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 122#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
 123#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
 124
 125#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 126#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
 127#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 128#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 129#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
 130#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 131#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 132#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 133
 134#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 135#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 136#define MLX5E_MIN_NUM_CHANNELS         0x1
 137#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 138#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 139#define MLX5E_TX_CQ_POLL_BUDGET        128
 140#define MLX5E_TX_XSK_POLL_BUDGET       64
 141#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 142
 143#define MLX5E_UMR_WQE_INLINE_SZ \
 144        (sizeof(struct mlx5e_umr_wqe) + \
 145         ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
 146               MLX5_UMR_MTT_ALIGNMENT))
 147#define MLX5E_UMR_WQEBBS \
 148        (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
 149
 150#define MLX5E_MSG_LEVEL                 NETIF_MSG_LINK
 151
 152#define mlx5e_dbg(mlevel, priv, format, ...)                    \
 153do {                                                            \
 154        if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
 155                netdev_warn(priv->netdev, format,               \
 156                            ##__VA_ARGS__);                     \
 157} while (0)
 158
 159enum mlx5e_rq_group {
 160        MLX5E_RQ_GROUP_REGULAR,
 161        MLX5E_RQ_GROUP_XSK,
 162#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
 163};
 164
 165static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 166{
 167        switch (wq_type) {
 168        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 169                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 170                             wq_size / 2);
 171        default:
 172                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 173                             wq_size / 2);
 174        }
 175}
 176
 177/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
 178static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 179{
 180        return is_kdump_kernel() ?
 181                MLX5E_MIN_NUM_CHANNELS :
 182                min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
 183}
 184
 185struct mlx5e_tx_wqe {
 186        struct mlx5_wqe_ctrl_seg ctrl;
 187        union {
 188                struct {
 189                        struct mlx5_wqe_eth_seg  eth;
 190                        struct mlx5_wqe_data_seg data[0];
 191                };
 192                u8 tls_progress_params_ctx[0];
 193        };
 194};
 195
 196struct mlx5e_rx_wqe_ll {
 197        struct mlx5_wqe_srq_next_seg  next;
 198        struct mlx5_wqe_data_seg      data[0];
 199};
 200
 201struct mlx5e_rx_wqe_cyc {
 202        struct mlx5_wqe_data_seg      data[0];
 203};
 204
 205struct mlx5e_umr_wqe {
 206        struct mlx5_wqe_ctrl_seg       ctrl;
 207        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 208        struct mlx5_mkey_seg           mkc;
 209        union {
 210                struct mlx5_mtt        inline_mtts[0];
 211                u8                     tls_static_params_ctx[0];
 212        };
 213};
 214
 215extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
 216
 217enum mlx5e_priv_flag {
 218        MLX5E_PFLAG_RX_CQE_BASED_MODER,
 219        MLX5E_PFLAG_TX_CQE_BASED_MODER,
 220        MLX5E_PFLAG_RX_CQE_COMPRESS,
 221        MLX5E_PFLAG_RX_STRIDING_RQ,
 222        MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
 223        MLX5E_PFLAG_XDP_TX_MPWQE,
 224        MLX5E_NUM_PFLAGS, /* Keep last */
 225};
 226
 227#define MLX5E_SET_PFLAG(params, pflag, enable)                  \
 228        do {                                                    \
 229                if (enable)                                     \
 230                        (params)->pflags |= BIT(pflag);         \
 231                else                                            \
 232                        (params)->pflags &= ~(BIT(pflag));      \
 233        } while (0)
 234
 235#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
 236
 237#ifdef CONFIG_MLX5_CORE_EN_DCB
 238#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 239#endif
 240
 241struct mlx5e_params {
 242        u8  log_sq_size;
 243        u8  rq_wq_type;
 244        u8  log_rq_mtu_frames;
 245        u16 num_channels;
 246        u8  num_tc;
 247        bool rx_cqe_compress_def;
 248        bool tunneled_offload_en;
 249        struct dim_cq_moder rx_cq_moderation;
 250        struct dim_cq_moder tx_cq_moderation;
 251        bool lro_en;
 252        u8  tx_min_inline_mode;
 253        bool vlan_strip_disable;
 254        bool scatter_fcs_en;
 255        bool rx_dim_enabled;
 256        bool tx_dim_enabled;
 257        u32 lro_timeout;
 258        u32 pflags;
 259        struct bpf_prog *xdp_prog;
 260        struct mlx5e_xsk *xsk;
 261        unsigned int sw_mtu;
 262        int hard_mtu;
 263};
 264
 265#ifdef CONFIG_MLX5_CORE_EN_DCB
 266struct mlx5e_cee_config {
 267        /* bw pct for priority group */
 268        u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
 269        u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
 270        bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
 271        bool                       pfc_enable;
 272};
 273
 274enum {
 275        MLX5_DCB_CHG_RESET,
 276        MLX5_DCB_NO_CHG,
 277        MLX5_DCB_CHG_NO_RESET,
 278};
 279
 280struct mlx5e_dcbx {
 281        enum mlx5_dcbx_oper_mode   mode;
 282        struct mlx5e_cee_config    cee_cfg; /* pending configuration */
 283        u8                         dscp_app_cnt;
 284
 285        /* The only setting that cannot be read from FW */
 286        u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
 287        u8                         cap;
 288
 289        /* Buffer configuration */
 290        bool                       manual_buffer;
 291        u32                        cable_len;
 292        u32                        xoff;
 293};
 294
 295struct mlx5e_dcbx_dp {
 296        u8                         dscp2prio[MLX5E_MAX_DSCP];
 297        u8                         trust_state;
 298};
 299#endif
 300
 301enum {
 302        MLX5E_RQ_STATE_ENABLED,
 303        MLX5E_RQ_STATE_AM,
 304        MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 305        MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
 306};
 307
 308struct mlx5e_cq {
 309        /* data path - accessed per cqe */
 310        struct mlx5_cqwq           wq;
 311
 312        /* data path - accessed per napi poll */
 313        u16                        event_ctr;
 314        struct napi_struct        *napi;
 315        struct mlx5_core_cq        mcq;
 316        struct mlx5e_channel      *channel;
 317
 318        /* control */
 319        struct mlx5_core_dev      *mdev;
 320        struct mlx5_wq_ctrl        wq_ctrl;
 321} ____cacheline_aligned_in_smp;
 322
 323struct mlx5e_cq_decomp {
 324        /* cqe decompression */
 325        struct mlx5_cqe64          title;
 326        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 327        u8                         mini_arr_idx;
 328        u16                        left;
 329        u16                        wqe_counter;
 330} ____cacheline_aligned_in_smp;
 331
 332struct mlx5e_tx_wqe_info {
 333        struct sk_buff *skb;
 334        u32 num_bytes;
 335        u8  num_wqebbs;
 336        u8  num_dma;
 337#ifdef CONFIG_MLX5_EN_TLS
 338        skb_frag_t *resync_dump_frag;
 339#endif
 340};
 341
 342enum mlx5e_dma_map_type {
 343        MLX5E_DMA_MAP_SINGLE,
 344        MLX5E_DMA_MAP_PAGE
 345};
 346
 347struct mlx5e_sq_dma {
 348        dma_addr_t              addr;
 349        u32                     size;
 350        enum mlx5e_dma_map_type type;
 351};
 352
 353enum {
 354        MLX5E_SQ_STATE_ENABLED,
 355        MLX5E_SQ_STATE_RECOVERING,
 356        MLX5E_SQ_STATE_IPSEC,
 357        MLX5E_SQ_STATE_AM,
 358        MLX5E_SQ_STATE_TLS,
 359};
 360
 361struct mlx5e_sq_wqe_info {
 362        u8  opcode;
 363
 364        /* Auxiliary data for different opcodes. */
 365        union {
 366                struct {
 367                        struct mlx5e_rq *rq;
 368                } umr;
 369        };
 370};
 371
 372struct mlx5e_txqsq {
 373        /* data path */
 374
 375        /* dirtied @completion */
 376        u16                        cc;
 377        u32                        dma_fifo_cc;
 378        struct dim                 dim; /* Adaptive Moderation */
 379
 380        /* dirtied @xmit */
 381        u16                        pc ____cacheline_aligned_in_smp;
 382        u32                        dma_fifo_pc;
 383
 384        struct mlx5e_cq            cq;
 385
 386        /* read only */
 387        struct mlx5_wq_cyc         wq;
 388        u32                        dma_fifo_mask;
 389        struct mlx5e_sq_stats     *stats;
 390        struct {
 391                struct mlx5e_sq_dma       *dma_fifo;
 392                struct mlx5e_tx_wqe_info  *wqe_info;
 393        } db;
 394        void __iomem              *uar_map;
 395        struct netdev_queue       *txq;
 396        u32                        sqn;
 397        u16                        stop_room;
 398        u8                         min_inline_mode;
 399        struct device             *pdev;
 400        __be32                     mkey_be;
 401        unsigned long              state;
 402        struct hwtstamp_config    *tstamp;
 403        struct mlx5_clock         *clock;
 404
 405        /* control path */
 406        struct mlx5_wq_ctrl        wq_ctrl;
 407        struct mlx5e_channel      *channel;
 408        int                        ch_ix;
 409        int                        txq_ix;
 410        u32                        rate_limit;
 411        struct work_struct         recover_work;
 412} ____cacheline_aligned_in_smp;
 413
 414struct mlx5e_dma_info {
 415        dma_addr_t addr;
 416        union {
 417                struct page *page;
 418                struct {
 419                        u64 handle;
 420                        void *data;
 421                } xsk;
 422        };
 423};
 424
 425/* XDP packets can be transmitted in different ways. On completion, we need to
 426 * distinguish between them to clean up things in a proper way.
 427 */
 428enum mlx5e_xdp_xmit_mode {
 429        /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
 430         * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
 431         * returned.
 432         */
 433        MLX5E_XDP_XMIT_MODE_FRAME,
 434
 435        /* The xdp_frame was created in place as a result of XDP_TX from a
 436         * regular RQ. No DMA remapping happened, and the page belongs to us.
 437         */
 438        MLX5E_XDP_XMIT_MODE_PAGE,
 439
 440        /* No xdp_frame was created at all, the transmit happened from a UMEM
 441         * page. The UMEM Completion Ring producer pointer has to be increased.
 442         */
 443        MLX5E_XDP_XMIT_MODE_XSK,
 444};
 445
 446struct mlx5e_xdp_info {
 447        enum mlx5e_xdp_xmit_mode mode;
 448        union {
 449                struct {
 450                        struct xdp_frame *xdpf;
 451                        dma_addr_t dma_addr;
 452                } frame;
 453                struct {
 454                        struct mlx5e_rq *rq;
 455                        struct mlx5e_dma_info di;
 456                } page;
 457        };
 458};
 459
 460struct mlx5e_xdp_xmit_data {
 461        dma_addr_t  dma_addr;
 462        void       *data;
 463        u32         len;
 464};
 465
 466struct mlx5e_xdp_info_fifo {
 467        struct mlx5e_xdp_info *xi;
 468        u32 *cc;
 469        u32 *pc;
 470        u32 mask;
 471};
 472
 473struct mlx5e_xdp_wqe_info {
 474        u8 num_wqebbs;
 475        u8 num_pkts;
 476};
 477
 478struct mlx5e_xdp_mpwqe {
 479        /* Current MPWQE session */
 480        struct mlx5e_tx_wqe *wqe;
 481        u8                   ds_count;
 482        u8                   pkt_count;
 483        u8                   max_ds_count;
 484        u8                   complete;
 485        u8                   inline_on;
 486};
 487
 488struct mlx5e_xdpsq;
 489typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
 490typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
 491                                        struct mlx5e_xdp_xmit_data *,
 492                                        struct mlx5e_xdp_info *,
 493                                        int);
 494
 495struct mlx5e_xdpsq {
 496        /* data path */
 497
 498        /* dirtied @completion */
 499        u32                        xdpi_fifo_cc;
 500        u16                        cc;
 501
 502        /* dirtied @xmit */
 503        u32                        xdpi_fifo_pc ____cacheline_aligned_in_smp;
 504        u16                        pc;
 505        struct mlx5_wqe_ctrl_seg   *doorbell_cseg;
 506        struct mlx5e_xdp_mpwqe     mpwqe;
 507
 508        struct mlx5e_cq            cq;
 509
 510        /* read only */
 511        struct xdp_umem           *umem;
 512        struct mlx5_wq_cyc         wq;
 513        struct mlx5e_xdpsq_stats  *stats;
 514        mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
 515        mlx5e_fp_xmit_xdp_frame    xmit_xdp_frame;
 516        struct {
 517                struct mlx5e_xdp_wqe_info *wqe_info;
 518                struct mlx5e_xdp_info_fifo xdpi_fifo;
 519        } db;
 520        void __iomem              *uar_map;
 521        u32                        sqn;
 522        struct device             *pdev;
 523        __be32                     mkey_be;
 524        u8                         min_inline_mode;
 525        unsigned long              state;
 526        unsigned int               hw_mtu;
 527
 528        /* control path */
 529        struct mlx5_wq_ctrl        wq_ctrl;
 530        struct mlx5e_channel      *channel;
 531} ____cacheline_aligned_in_smp;
 532
 533struct mlx5e_icosq {
 534        /* data path */
 535        u16                        cc;
 536        u16                        pc;
 537
 538        struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
 539        struct mlx5e_cq            cq;
 540
 541        /* write@xmit, read@completion */
 542        struct {
 543                struct mlx5e_sq_wqe_info *ico_wqe;
 544        } db;
 545
 546        /* read only */
 547        struct mlx5_wq_cyc         wq;
 548        void __iomem              *uar_map;
 549        u32                        sqn;
 550        unsigned long              state;
 551
 552        /* control path */
 553        struct mlx5_wq_ctrl        wq_ctrl;
 554        struct mlx5e_channel      *channel;
 555} ____cacheline_aligned_in_smp;
 556
 557struct mlx5e_wqe_frag_info {
 558        struct mlx5e_dma_info *di;
 559        u32 offset;
 560        bool last_in_page;
 561};
 562
 563struct mlx5e_umr_dma_info {
 564        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 565};
 566
 567struct mlx5e_mpw_info {
 568        struct mlx5e_umr_dma_info umr;
 569        u16 consumed_strides;
 570        DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 571};
 572
 573#define MLX5E_MAX_RX_FRAGS 4
 574
 575/* a single cache unit is capable to serve one napi call (for non-striding rq)
 576 * or a MPWQE (for striding rq).
 577 */
 578#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 579                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 580#define MLX5E_CACHE_SIZE        (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 581struct mlx5e_page_cache {
 582        u32 head;
 583        u32 tail;
 584        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 585};
 586
 587struct mlx5e_rq;
 588typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 589typedef struct sk_buff *
 590(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 591                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 592typedef struct sk_buff *
 593(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 594                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 595typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 596typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 597
 598enum mlx5e_rq_flag {
 599        MLX5E_RQ_FLAG_XDP_XMIT,
 600        MLX5E_RQ_FLAG_XDP_REDIRECT,
 601};
 602
 603struct mlx5e_rq_frag_info {
 604        int frag_size;
 605        int frag_stride;
 606};
 607
 608struct mlx5e_rq_frags_info {
 609        struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
 610        u8 num_frags;
 611        u8 log_num_frags;
 612        u8 wqe_bulk;
 613};
 614
 615struct mlx5e_rq {
 616        /* data path */
 617        union {
 618                struct {
 619                        struct mlx5_wq_cyc          wq;
 620                        struct mlx5e_wqe_frag_info *frags;
 621                        struct mlx5e_dma_info      *di;
 622                        struct mlx5e_rq_frags_info  info;
 623                        mlx5e_fp_skb_from_cqe       skb_from_cqe;
 624                } wqe;
 625                struct {
 626                        struct mlx5_wq_ll      wq;
 627                        struct mlx5e_umr_wqe   umr_wqe;
 628                        struct mlx5e_mpw_info *info;
 629                        mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
 630                        u16                    num_strides;
 631                        u16                    actual_wq_head;
 632                        u8                     log_stride_sz;
 633                        u8                     umr_in_progress;
 634                        u8                     umr_last_bulk;
 635                        u8                     umr_completed;
 636                } mpwqe;
 637        };
 638        struct {
 639                u16            umem_headroom;
 640                u16            headroom;
 641                u8             map_dir;   /* dma map direction */
 642        } buff;
 643
 644        struct mlx5e_channel  *channel;
 645        struct device         *pdev;
 646        struct net_device     *netdev;
 647        struct mlx5e_rq_stats *stats;
 648        struct mlx5e_cq        cq;
 649        struct mlx5e_cq_decomp cqd;
 650        struct mlx5e_page_cache page_cache;
 651        struct hwtstamp_config *tstamp;
 652        struct mlx5_clock      *clock;
 653
 654        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 655        mlx5e_fp_post_rx_wqes  post_wqes;
 656        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 657
 658        unsigned long          state;
 659        int                    ix;
 660        unsigned int           hw_mtu;
 661
 662        struct dim         dim; /* Dynamic Interrupt Moderation */
 663
 664        /* XDP */
 665        struct bpf_prog       *xdp_prog;
 666        struct mlx5e_xdpsq    *xdpsq;
 667        DECLARE_BITMAP(flags, 8);
 668        struct page_pool      *page_pool;
 669
 670        /* AF_XDP zero-copy */
 671        struct zero_copy_allocator zca;
 672        struct xdp_umem       *umem;
 673
 674        /* control */
 675        struct mlx5_wq_ctrl    wq_ctrl;
 676        __be32                 mkey_be;
 677        u8                     wq_type;
 678        u32                    rqn;
 679        struct mlx5_core_dev  *mdev;
 680        struct mlx5_core_mkey  umr_mkey;
 681
 682        /* XDP read-mostly */
 683        struct xdp_rxq_info    xdp_rxq;
 684} ____cacheline_aligned_in_smp;
 685
 686enum mlx5e_channel_state {
 687        MLX5E_CHANNEL_STATE_XSK,
 688        MLX5E_CHANNEL_NUM_STATES
 689};
 690
 691struct mlx5e_channel {
 692        /* data path */
 693        struct mlx5e_rq            rq;
 694        struct mlx5e_xdpsq         rq_xdpsq;
 695        struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
 696        struct mlx5e_icosq         icosq;   /* internal control operations */
 697        bool                       xdp;
 698        struct napi_struct         napi;
 699        struct device             *pdev;
 700        struct net_device         *netdev;
 701        __be32                     mkey_be;
 702        u8                         num_tc;
 703
 704        /* XDP_REDIRECT */
 705        struct mlx5e_xdpsq         xdpsq;
 706
 707        /* AF_XDP zero-copy */
 708        struct mlx5e_rq            xskrq;
 709        struct mlx5e_xdpsq         xsksq;
 710        struct mlx5e_icosq         xskicosq;
 711        /* xskicosq can be accessed from any CPU - the spinlock protects it. */
 712        spinlock_t                 xskicosq_lock;
 713
 714        /* data path - accessed per napi poll */
 715        struct irq_desc *irq_desc;
 716        struct mlx5e_ch_stats     *stats;
 717
 718        /* control */
 719        struct mlx5e_priv         *priv;
 720        struct mlx5_core_dev      *mdev;
 721        struct hwtstamp_config    *tstamp;
 722        DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
 723        int                        ix;
 724        int                        cpu;
 725        cpumask_var_t              xps_cpumask;
 726};
 727
 728struct mlx5e_channels {
 729        struct mlx5e_channel **c;
 730        unsigned int           num;
 731        struct mlx5e_params    params;
 732};
 733
 734struct mlx5e_channel_stats {
 735        struct mlx5e_ch_stats ch;
 736        struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 737        struct mlx5e_rq_stats rq;
 738        struct mlx5e_rq_stats xskrq;
 739        struct mlx5e_xdpsq_stats rq_xdpsq;
 740        struct mlx5e_xdpsq_stats xdpsq;
 741        struct mlx5e_xdpsq_stats xsksq;
 742} ____cacheline_aligned_in_smp;
 743
 744enum {
 745        MLX5E_STATE_OPENED,
 746        MLX5E_STATE_DESTROYING,
 747        MLX5E_STATE_XDP_TX_ENABLED,
 748        MLX5E_STATE_XDP_OPEN,
 749};
 750
 751struct mlx5e_rqt {
 752        u32              rqtn;
 753        bool             enabled;
 754};
 755
 756struct mlx5e_tir {
 757        u32               tirn;
 758        struct mlx5e_rqt  rqt;
 759        struct list_head  list;
 760};
 761
 762enum {
 763        MLX5E_TC_PRIO = 0,
 764        MLX5E_NIC_PRIO
 765};
 766
 767struct mlx5e_rss_params {
 768        u32     indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 769        u32     rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
 770        u8      toeplitz_hash_key[40];
 771        u8      hfunc;
 772};
 773
 774struct mlx5e_modify_sq_param {
 775        int curr_state;
 776        int next_state;
 777        int rl_update;
 778        int rl_index;
 779};
 780
 781struct mlx5e_xsk {
 782        /* UMEMs are stored separately from channels, because we don't want to
 783         * lose them when channels are recreated. The kernel also stores UMEMs,
 784         * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
 785         * so rely on our mechanism.
 786         */
 787        struct xdp_umem **umems;
 788        u16 refcnt;
 789        bool ever_used;
 790};
 791
 792struct mlx5e_priv {
 793        /* priv data path fields - start */
 794        struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
 795        int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 796#ifdef CONFIG_MLX5_CORE_EN_DCB
 797        struct mlx5e_dcbx_dp       dcbx_dp;
 798#endif
 799        /* priv data path fields - end */
 800
 801        u32                        msglevel;
 802        unsigned long              state;
 803        struct mutex               state_lock; /* Protects Interface state */
 804        struct mlx5e_rq            drop_rq;
 805
 806        struct mlx5e_channels      channels;
 807        u32                        tisn[MLX5E_MAX_NUM_TC];
 808        struct mlx5e_rqt           indir_rqt;
 809        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 810        struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
 811        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 812        struct mlx5e_tir           xsk_tir[MLX5E_MAX_NUM_CHANNELS];
 813        struct mlx5e_rss_params    rss_params;
 814        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 815
 816        struct mlx5e_flow_steering fs;
 817
 818        struct workqueue_struct    *wq;
 819        struct work_struct         update_carrier_work;
 820        struct work_struct         set_rx_mode_work;
 821        struct work_struct         tx_timeout_work;
 822        struct work_struct         update_stats_work;
 823        struct work_struct         monitor_counters_work;
 824        struct mlx5_nb             monitor_counters_nb;
 825
 826        struct mlx5_core_dev      *mdev;
 827        struct net_device         *netdev;
 828        struct mlx5e_stats         stats;
 829        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
 830        u16                        max_nch;
 831        u8                         max_opened_tc;
 832        struct hwtstamp_config     tstamp;
 833        u16                        q_counter;
 834        u16                        drop_rq_q_counter;
 835        struct notifier_block      events_nb;
 836
 837#ifdef CONFIG_MLX5_CORE_EN_DCB
 838        struct mlx5e_dcbx          dcbx;
 839#endif
 840
 841        const struct mlx5e_profile *profile;
 842        void                      *ppriv;
 843#ifdef CONFIG_MLX5_EN_IPSEC
 844        struct mlx5e_ipsec        *ipsec;
 845#endif
 846#ifdef CONFIG_MLX5_EN_TLS
 847        struct mlx5e_tls          *tls;
 848#endif
 849        struct devlink_health_reporter *tx_reporter;
 850        struct mlx5e_xsk           xsk;
 851};
 852
 853struct mlx5e_profile {
 854        int     (*init)(struct mlx5_core_dev *mdev,
 855                        struct net_device *netdev,
 856                        const struct mlx5e_profile *profile, void *ppriv);
 857        void    (*cleanup)(struct mlx5e_priv *priv);
 858        int     (*init_rx)(struct mlx5e_priv *priv);
 859        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 860        int     (*init_tx)(struct mlx5e_priv *priv);
 861        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 862        void    (*enable)(struct mlx5e_priv *priv);
 863        void    (*disable)(struct mlx5e_priv *priv);
 864        int     (*update_rx)(struct mlx5e_priv *priv);
 865        void    (*update_stats)(struct mlx5e_priv *priv);
 866        void    (*update_carrier)(struct mlx5e_priv *priv);
 867        struct {
 868                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 869                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
 870        } rx_handlers;
 871        int     max_tc;
 872        u8      rq_groups;
 873};
 874
 875void mlx5e_build_ptys2ethtool_map(void);
 876
 877u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 878                       struct net_device *sb_dev);
 879netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 880netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 881                          struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
 882
 883void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
 884void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
 885void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 886int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 887bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 888int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 889void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
 890
 891bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 892bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
 893                                struct mlx5e_params *params);
 894
 895void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
 896void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
 897                                struct mlx5e_dma_info *dma_info,
 898                                bool recycle);
 899void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 900void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 901bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 902void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
 903bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
 904void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 905void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 906struct sk_buff *
 907mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 908                                u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 909struct sk_buff *
 910mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 911                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 912struct sk_buff *
 913mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 914                          struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 915struct sk_buff *
 916mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 917                             struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 918
 919void mlx5e_update_stats(struct mlx5e_priv *priv);
 920void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 921void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
 922
 923void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 924int mlx5e_self_test_num(struct mlx5e_priv *priv);
 925void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
 926                     u64 *buf);
 927void mlx5e_set_rx_mode_work(struct work_struct *work);
 928
 929int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 930int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
 931int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 932
 933int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 934                          u16 vid);
 935int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 936                           u16 vid);
 937void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 938
 939struct mlx5e_redirect_rqt_param {
 940        bool is_rss;
 941        union {
 942                u32 rqn; /* Direct RQN (Non-RSS) */
 943                struct {
 944                        u8 hfunc;
 945                        struct mlx5e_channels *channels;
 946                } rss; /* RSS data */
 947        };
 948};
 949
 950int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
 951                       struct mlx5e_redirect_rqt_param rrp);
 952void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
 953                                    const struct mlx5e_tirc_config *ttconfig,
 954                                    void *tirc, bool inner);
 955void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
 956struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
 957
 958struct mlx5e_xsk_param;
 959
 960struct mlx5e_rq_param;
 961int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
 962                  struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
 963                  struct xdp_umem *umem, struct mlx5e_rq *rq);
 964int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
 965void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 966void mlx5e_close_rq(struct mlx5e_rq *rq);
 967
 968struct mlx5e_sq_param;
 969int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
 970                     struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
 971void mlx5e_close_icosq(struct mlx5e_icosq *sq);
 972int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
 973                     struct mlx5e_sq_param *param, struct xdp_umem *umem,
 974                     struct mlx5e_xdpsq *sq, bool is_redirect);
 975void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
 976
 977struct mlx5e_cq_param;
 978int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
 979                  struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
 980void mlx5e_close_cq(struct mlx5e_cq *cq);
 981
 982int mlx5e_open_locked(struct net_device *netdev);
 983int mlx5e_close_locked(struct net_device *netdev);
 984
 985int mlx5e_open_channels(struct mlx5e_priv *priv,
 986                        struct mlx5e_channels *chs);
 987void mlx5e_close_channels(struct mlx5e_channels *chs);
 988
 989/* Function pointer to be used to modify WH settings while
 990 * switching channels
 991 */
 992typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
 993int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
 994int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
 995                               struct mlx5e_channels *new_chs,
 996                               mlx5e_fp_hw_modify hw_modify);
 997void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
 998void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 999
1000void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
1001                                   int num_channels);
1002void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
1003                                 u8 cq_period_mode);
1004void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
1005                                 u8 cq_period_mode);
1006void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
1007void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
1008                               struct mlx5e_params *params);
1009
1010int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1011                    struct mlx5e_modify_sq_param *p);
1012void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
1013void mlx5e_tx_disable_queue(struct netdev_queue *txq);
1014
1015static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
1016{
1017        return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
1018                MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
1019}
1020
1021static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1022{
1023        return MLX5_CAP_ETH(mdev, swp) &&
1024                MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1025}
1026
1027extern const struct ethtool_ops mlx5e_ethtool_ops;
1028#ifdef CONFIG_MLX5_CORE_EN_DCB
1029extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
1030int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
1031void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
1032void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
1033void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
1034#endif
1035
1036int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1037                     struct mlx5e_tir *tir, u32 *in, int inlen);
1038void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1039                       struct mlx5e_tir *tir);
1040int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1041void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1042int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
1043
1044/* common netdev helpers */
1045void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1046void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1047int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1048                       struct mlx5e_rq *drop_rq);
1049void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1050
1051int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1052
1053int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1054void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1055
1056int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1057void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1058int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1059void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1060void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1061
1062int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1063void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1064
1065int mlx5e_create_tises(struct mlx5e_priv *priv);
1066int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1067void mlx5e_update_carrier(struct mlx5e_priv *priv);
1068int mlx5e_close(struct net_device *netdev);
1069int mlx5e_open(struct net_device *netdev);
1070void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
1071
1072void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1073int mlx5e_bits_invert(unsigned long a, int size);
1074
1075typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
1076int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1077int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1078                     change_hw_mtu_cb set_mtu_cb);
1079
1080/* ethtool helpers */
1081void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1082                               struct ethtool_drvinfo *drvinfo);
1083void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1084                               uint32_t stringset, uint8_t *data);
1085int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1086void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1087                                     struct ethtool_stats *stats, u64 *data);
1088void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1089                                 struct ethtool_ringparam *param);
1090int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1091                                struct ethtool_ringparam *param);
1092void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1093                                struct ethtool_channels *ch);
1094int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1095                               struct ethtool_channels *ch);
1096int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1097                               struct ethtool_coalesce *coal);
1098int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1099                               struct ethtool_coalesce *coal);
1100int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1101                                     struct ethtool_link_ksettings *link_ksettings);
1102int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1103                                     const struct ethtool_link_ksettings *link_ksettings);
1104u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1105u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1106int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1107                              struct ethtool_ts_info *info);
1108int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1109                               struct ethtool_flash *flash);
1110void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1111                                  struct ethtool_pauseparam *pauseparam);
1112int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1113                                 struct ethtool_pauseparam *pauseparam);
1114
1115/* mlx5e generic netdev management API */
1116int mlx5e_netdev_init(struct net_device *netdev,
1117                      struct mlx5e_priv *priv,
1118                      struct mlx5_core_dev *mdev,
1119                      const struct mlx5e_profile *profile,
1120                      void *ppriv);
1121void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
1122struct net_device*
1123mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1124                    int nch, void *ppriv);
1125int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1126void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1127void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1128void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1129void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
1130                            struct mlx5e_xsk *xsk,
1131                            struct mlx5e_rss_params *rss_params,
1132                            struct mlx5e_params *params,
1133                            u16 max_channels, u16 mtu);
1134void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1135                           struct mlx5e_params *params);
1136void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1137                            u16 num_channels);
1138u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
1139void mlx5e_rx_dim_work(struct work_struct *work);
1140void mlx5e_tx_dim_work(struct work_struct *work);
1141
1142void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
1143void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
1144netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1145                                       struct net_device *netdev,
1146                                       netdev_features_t features);
1147int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1148#ifdef CONFIG_MLX5_ESWITCH
1149int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1150int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1151int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1152int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1153#endif
1154#endif /* __MLX5_EN_H__ */
1155