linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/crash_dump.h>
  40#include <linux/mlx5/driver.h>
  41#include <linux/mlx5/qp.h>
  42#include <linux/mlx5/cq.h>
  43#include <linux/mlx5/port.h>
  44#include <linux/mlx5/vport.h>
  45#include <linux/mlx5/transobj.h>
  46#include <linux/mlx5/fs.h>
  47#include <linux/rhashtable.h>
  48#include <net/udp_tunnel.h>
  49#include <net/switchdev.h>
  50#include <net/xdp.h>
  51#include <linux/dim.h>
  52#include <linux/bits.h>
  53#include "wq.h"
  54#include "mlx5_core.h"
  55#include "en_stats.h"
  56#include "en/dcbnl.h"
  57#include "en/fs.h"
  58#include "lib/hv_vhca.h"
  59
  60extern const struct net_device_ops mlx5e_netdev_ops;
  61struct page_pool;
  62
  63#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
  64#define MLX5E_METADATA_ETHER_LEN 8
  65
  66#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  67
  68#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  69
  70#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
  71#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
  72
  73#define MLX5E_MAX_NUM_TC        8
  74
  75#define MLX5_RX_HEADROOM NET_SKB_PAD
  76#define MLX5_SKB_FRAG_SZ(len)   (SKB_DATA_ALIGN(len) +  \
  77                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  78
  79#define MLX5E_RX_MAX_HEAD (256)
  80
  81#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
  82        (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
  83#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
  84        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
  85#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
  86        MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
  87
  88#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  89#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  90                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  91#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  92
  93#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  94/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
  95 * WQEs, This page will absorb write overflow by the hardware, when
  96 * receiving packets larger than MTU. These oversize packets are
  97 * dropped by the driver at a later stage.
  98 */
  99#define MLX5E_REQUIRED_WQE_MTTS         (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
 100#define MLX5E_LOG_ALIGNED_MPWQE_PPW     (ilog2(MLX5E_REQUIRED_WQE_MTTS))
 101#define MLX5E_REQUIRED_MTTS(wqes)       (wqes * MLX5E_REQUIRED_WQE_MTTS)
 102#define MLX5E_MAX_RQ_NUM_MTTS   \
 103        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
 104#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
 105#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW    \
 106                (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
 107#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
 108        (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
 109         (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
 110
 111#define MLX5E_MIN_SKB_FRAG_SZ           (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
 112#define MLX5E_LOG_MAX_RX_WQE_BULK       \
 113        (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
 114
 115#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 116#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 117#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 118
 119#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
 120#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 121#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
 122                                               MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
 123
 124#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
 125
 126#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 127#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
 128#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
 129
 130#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 131#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
 132#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 133#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 134#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
 135#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 136#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 137#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 138
 139#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 140#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 141#define MLX5E_MIN_NUM_CHANNELS         0x1
 142#define MLX5E_MAX_NUM_CHANNELS         MLX5E_INDIR_RQT_SIZE
 143#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 144#define MLX5E_TX_CQ_POLL_BUDGET        128
 145#define MLX5E_TX_XSK_POLL_BUDGET       64
 146#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 147
 148#define MLX5E_UMR_WQE_INLINE_SZ \
 149        (sizeof(struct mlx5e_umr_wqe) + \
 150         ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
 151               MLX5_UMR_MTT_ALIGNMENT))
 152#define MLX5E_UMR_WQEBBS \
 153        (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
 154
 155#define MLX5E_MSG_LEVEL                 NETIF_MSG_LINK
 156
 157#define mlx5e_dbg(mlevel, priv, format, ...)                    \
 158do {                                                            \
 159        if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
 160                netdev_warn(priv->netdev, format,               \
 161                            ##__VA_ARGS__);                     \
 162} while (0)
 163
 164enum mlx5e_rq_group {
 165        MLX5E_RQ_GROUP_REGULAR,
 166        MLX5E_RQ_GROUP_XSK,
 167#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
 168};
 169
 170static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
 171{
 172        if (mlx5_lag_is_lacp_owner(mdev))
 173                return 1;
 174
 175        return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
 176}
 177
 178static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 179{
 180        switch (wq_type) {
 181        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 182                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 183                             wq_size / 2);
 184        default:
 185                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 186                             wq_size / 2);
 187        }
 188}
 189
 190/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
 191static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 192{
 193        return is_kdump_kernel() ?
 194                MLX5E_MIN_NUM_CHANNELS :
 195                min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
 196}
 197
 198struct mlx5e_tx_wqe {
 199        struct mlx5_wqe_ctrl_seg ctrl;
 200        struct mlx5_wqe_eth_seg  eth;
 201        struct mlx5_wqe_data_seg data[0];
 202};
 203
 204struct mlx5e_rx_wqe_ll {
 205        struct mlx5_wqe_srq_next_seg  next;
 206        struct mlx5_wqe_data_seg      data[];
 207};
 208
 209struct mlx5e_rx_wqe_cyc {
 210        struct mlx5_wqe_data_seg      data[0];
 211};
 212
 213struct mlx5e_umr_wqe {
 214        struct mlx5_wqe_ctrl_seg       ctrl;
 215        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 216        struct mlx5_mkey_seg           mkc;
 217        struct mlx5_mtt                inline_mtts[0];
 218};
 219
 220extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
 221
 222enum mlx5e_priv_flag {
 223        MLX5E_PFLAG_RX_CQE_BASED_MODER,
 224        MLX5E_PFLAG_TX_CQE_BASED_MODER,
 225        MLX5E_PFLAG_RX_CQE_COMPRESS,
 226        MLX5E_PFLAG_RX_STRIDING_RQ,
 227        MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
 228        MLX5E_PFLAG_XDP_TX_MPWQE,
 229        MLX5E_NUM_PFLAGS, /* Keep last */
 230};
 231
 232#define MLX5E_SET_PFLAG(params, pflag, enable)                  \
 233        do {                                                    \
 234                if (enable)                                     \
 235                        (params)->pflags |= BIT(pflag);         \
 236                else                                            \
 237                        (params)->pflags &= ~(BIT(pflag));      \
 238        } while (0)
 239
 240#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
 241
 242struct mlx5e_params {
 243        u8  log_sq_size;
 244        u8  rq_wq_type;
 245        u8  log_rq_mtu_frames;
 246        u16 num_channels;
 247        u8  num_tc;
 248        bool rx_cqe_compress_def;
 249        bool tunneled_offload_en;
 250        struct dim_cq_moder rx_cq_moderation;
 251        struct dim_cq_moder tx_cq_moderation;
 252        bool lro_en;
 253        u8  tx_min_inline_mode;
 254        bool vlan_strip_disable;
 255        bool scatter_fcs_en;
 256        bool rx_dim_enabled;
 257        bool tx_dim_enabled;
 258        u32 lro_timeout;
 259        u32 pflags;
 260        struct bpf_prog *xdp_prog;
 261        struct mlx5e_xsk *xsk;
 262        unsigned int sw_mtu;
 263        int hard_mtu;
 264};
 265
 266enum {
 267        MLX5E_RQ_STATE_ENABLED,
 268        MLX5E_RQ_STATE_RECOVERING,
 269        MLX5E_RQ_STATE_AM,
 270        MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 271        MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
 272        MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
 273};
 274
 275struct mlx5e_cq {
 276        /* data path - accessed per cqe */
 277        struct mlx5_cqwq           wq;
 278
 279        /* data path - accessed per napi poll */
 280        u16                        event_ctr;
 281        struct napi_struct        *napi;
 282        struct mlx5_core_cq        mcq;
 283        struct mlx5e_channel      *channel;
 284
 285        /* control */
 286        struct mlx5_core_dev      *mdev;
 287        struct mlx5_wq_ctrl        wq_ctrl;
 288} ____cacheline_aligned_in_smp;
 289
 290struct mlx5e_cq_decomp {
 291        /* cqe decompression */
 292        struct mlx5_cqe64          title;
 293        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 294        u8                         mini_arr_idx;
 295        u16                        left;
 296        u16                        wqe_counter;
 297} ____cacheline_aligned_in_smp;
 298
 299enum mlx5e_dma_map_type {
 300        MLX5E_DMA_MAP_SINGLE,
 301        MLX5E_DMA_MAP_PAGE
 302};
 303
 304struct mlx5e_sq_dma {
 305        dma_addr_t              addr;
 306        u32                     size;
 307        enum mlx5e_dma_map_type type;
 308};
 309
 310enum {
 311        MLX5E_SQ_STATE_ENABLED,
 312        MLX5E_SQ_STATE_RECOVERING,
 313        MLX5E_SQ_STATE_IPSEC,
 314        MLX5E_SQ_STATE_AM,
 315        MLX5E_SQ_STATE_TLS,
 316        MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
 317        MLX5E_SQ_STATE_PENDING_XSK_TX,
 318};
 319
 320struct mlx5e_txqsq {
 321        /* data path */
 322
 323        /* dirtied @completion */
 324        u16                        cc;
 325        u32                        dma_fifo_cc;
 326        struct dim                 dim; /* Adaptive Moderation */
 327
 328        /* dirtied @xmit */
 329        u16                        pc ____cacheline_aligned_in_smp;
 330        u32                        dma_fifo_pc;
 331
 332        struct mlx5e_cq            cq;
 333
 334        /* read only */
 335        struct mlx5_wq_cyc         wq;
 336        u32                        dma_fifo_mask;
 337        struct mlx5e_sq_stats     *stats;
 338        struct {
 339                struct mlx5e_sq_dma       *dma_fifo;
 340                struct mlx5e_tx_wqe_info  *wqe_info;
 341        } db;
 342        void __iomem              *uar_map;
 343        struct netdev_queue       *txq;
 344        u32                        sqn;
 345        u16                        stop_room;
 346        u8                         min_inline_mode;
 347        struct device             *pdev;
 348        __be32                     mkey_be;
 349        unsigned long              state;
 350        unsigned int               hw_mtu;
 351        struct hwtstamp_config    *tstamp;
 352        struct mlx5_clock         *clock;
 353
 354        /* control path */
 355        struct mlx5_wq_ctrl        wq_ctrl;
 356        struct mlx5e_channel      *channel;
 357        int                        ch_ix;
 358        int                        txq_ix;
 359        u32                        rate_limit;
 360        struct work_struct         recover_work;
 361} ____cacheline_aligned_in_smp;
 362
 363struct mlx5e_dma_info {
 364        dma_addr_t addr;
 365        union {
 366                struct page *page;
 367                struct xdp_buff *xsk;
 368        };
 369};
 370
 371/* XDP packets can be transmitted in different ways. On completion, we need to
 372 * distinguish between them to clean up things in a proper way.
 373 */
 374enum mlx5e_xdp_xmit_mode {
 375        /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
 376         * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
 377         * returned.
 378         */
 379        MLX5E_XDP_XMIT_MODE_FRAME,
 380
 381        /* The xdp_frame was created in place as a result of XDP_TX from a
 382         * regular RQ. No DMA remapping happened, and the page belongs to us.
 383         */
 384        MLX5E_XDP_XMIT_MODE_PAGE,
 385
 386        /* No xdp_frame was created at all, the transmit happened from a UMEM
 387         * page. The UMEM Completion Ring producer pointer has to be increased.
 388         */
 389        MLX5E_XDP_XMIT_MODE_XSK,
 390};
 391
 392struct mlx5e_xdp_info {
 393        enum mlx5e_xdp_xmit_mode mode;
 394        union {
 395                struct {
 396                        struct xdp_frame *xdpf;
 397                        dma_addr_t dma_addr;
 398                } frame;
 399                struct {
 400                        struct mlx5e_rq *rq;
 401                        struct mlx5e_dma_info di;
 402                } page;
 403        };
 404};
 405
 406struct mlx5e_xdp_xmit_data {
 407        dma_addr_t  dma_addr;
 408        void       *data;
 409        u32         len;
 410};
 411
 412struct mlx5e_xdp_info_fifo {
 413        struct mlx5e_xdp_info *xi;
 414        u32 *cc;
 415        u32 *pc;
 416        u32 mask;
 417};
 418
 419struct mlx5e_xdp_mpwqe {
 420        /* Current MPWQE session */
 421        struct mlx5e_tx_wqe *wqe;
 422        u8                   ds_count;
 423        u8                   pkt_count;
 424        u8                   inline_on;
 425};
 426
 427struct mlx5e_xdpsq;
 428typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
 429typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
 430                                        struct mlx5e_xdp_xmit_data *,
 431                                        struct mlx5e_xdp_info *,
 432                                        int);
 433
 434struct mlx5e_xdpsq {
 435        /* data path */
 436
 437        /* dirtied @completion */
 438        u32                        xdpi_fifo_cc;
 439        u16                        cc;
 440
 441        /* dirtied @xmit */
 442        u32                        xdpi_fifo_pc ____cacheline_aligned_in_smp;
 443        u16                        pc;
 444        struct mlx5_wqe_ctrl_seg   *doorbell_cseg;
 445        struct mlx5e_xdp_mpwqe     mpwqe;
 446
 447        struct mlx5e_cq            cq;
 448
 449        /* read only */
 450        struct xdp_umem           *umem;
 451        struct mlx5_wq_cyc         wq;
 452        struct mlx5e_xdpsq_stats  *stats;
 453        mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
 454        mlx5e_fp_xmit_xdp_frame    xmit_xdp_frame;
 455        struct {
 456                struct mlx5e_xdp_wqe_info *wqe_info;
 457                struct mlx5e_xdp_info_fifo xdpi_fifo;
 458        } db;
 459        void __iomem              *uar_map;
 460        u32                        sqn;
 461        struct device             *pdev;
 462        __be32                     mkey_be;
 463        u8                         min_inline_mode;
 464        unsigned long              state;
 465        unsigned int               hw_mtu;
 466
 467        /* control path */
 468        struct mlx5_wq_ctrl        wq_ctrl;
 469        struct mlx5e_channel      *channel;
 470} ____cacheline_aligned_in_smp;
 471
 472struct mlx5e_icosq {
 473        /* data path */
 474        u16                        cc;
 475        u16                        pc;
 476
 477        struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
 478        struct mlx5e_cq            cq;
 479
 480        /* write@xmit, read@completion */
 481        struct {
 482                struct mlx5e_icosq_wqe_info *wqe_info;
 483        } db;
 484
 485        /* read only */
 486        struct mlx5_wq_cyc         wq;
 487        void __iomem              *uar_map;
 488        u32                        sqn;
 489        unsigned long              state;
 490
 491        /* control path */
 492        struct mlx5_wq_ctrl        wq_ctrl;
 493        struct mlx5e_channel      *channel;
 494
 495        struct work_struct         recover_work;
 496} ____cacheline_aligned_in_smp;
 497
 498struct mlx5e_wqe_frag_info {
 499        struct mlx5e_dma_info *di;
 500        u32 offset;
 501        bool last_in_page;
 502};
 503
 504struct mlx5e_umr_dma_info {
 505        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 506};
 507
 508struct mlx5e_mpw_info {
 509        struct mlx5e_umr_dma_info umr;
 510        u16 consumed_strides;
 511        DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 512};
 513
 514#define MLX5E_MAX_RX_FRAGS 4
 515
 516/* a single cache unit is capable to serve one napi call (for non-striding rq)
 517 * or a MPWQE (for striding rq).
 518 */
 519#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 520                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 521#define MLX5E_CACHE_SIZE        (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 522struct mlx5e_page_cache {
 523        u32 head;
 524        u32 tail;
 525        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 526};
 527
 528struct mlx5e_rq;
 529typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 530typedef struct sk_buff *
 531(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 532                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 533typedef struct sk_buff *
 534(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 535                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 536typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 537typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 538
 539int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
 540
 541enum mlx5e_rq_flag {
 542        MLX5E_RQ_FLAG_XDP_XMIT,
 543        MLX5E_RQ_FLAG_XDP_REDIRECT,
 544};
 545
 546struct mlx5e_rq_frag_info {
 547        int frag_size;
 548        int frag_stride;
 549};
 550
 551struct mlx5e_rq_frags_info {
 552        struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
 553        u8 num_frags;
 554        u8 log_num_frags;
 555        u8 wqe_bulk;
 556};
 557
 558struct mlx5e_rq {
 559        /* data path */
 560        union {
 561                struct {
 562                        struct mlx5_wq_cyc          wq;
 563                        struct mlx5e_wqe_frag_info *frags;
 564                        struct mlx5e_dma_info      *di;
 565                        struct mlx5e_rq_frags_info  info;
 566                        mlx5e_fp_skb_from_cqe       skb_from_cqe;
 567                } wqe;
 568                struct {
 569                        struct mlx5_wq_ll      wq;
 570                        struct mlx5e_umr_wqe   umr_wqe;
 571                        struct mlx5e_mpw_info *info;
 572                        mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
 573                        u16                    num_strides;
 574                        u16                    actual_wq_head;
 575                        u8                     log_stride_sz;
 576                        u8                     umr_in_progress;
 577                        u8                     umr_last_bulk;
 578                        u8                     umr_completed;
 579                } mpwqe;
 580        };
 581        struct {
 582                u16            headroom;
 583                u32            frame0_sz;
 584                u8             map_dir;   /* dma map direction */
 585        } buff;
 586
 587        struct mlx5e_channel  *channel;
 588        struct device         *pdev;
 589        struct net_device     *netdev;
 590        struct mlx5e_rq_stats *stats;
 591        struct mlx5e_cq        cq;
 592        struct mlx5e_cq_decomp cqd;
 593        struct mlx5e_page_cache page_cache;
 594        struct hwtstamp_config *tstamp;
 595        struct mlx5_clock      *clock;
 596
 597        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 598        mlx5e_fp_post_rx_wqes  post_wqes;
 599        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 600
 601        unsigned long          state;
 602        int                    ix;
 603        unsigned int           hw_mtu;
 604
 605        struct dim         dim; /* Dynamic Interrupt Moderation */
 606
 607        /* XDP */
 608        struct bpf_prog __rcu *xdp_prog;
 609        struct mlx5e_xdpsq    *xdpsq;
 610        DECLARE_BITMAP(flags, 8);
 611        struct page_pool      *page_pool;
 612
 613        /* AF_XDP zero-copy */
 614        struct xdp_umem       *umem;
 615
 616        struct work_struct     recover_work;
 617
 618        /* control */
 619        struct mlx5_wq_ctrl    wq_ctrl;
 620        __be32                 mkey_be;
 621        u8                     wq_type;
 622        u32                    rqn;
 623        struct mlx5_core_dev  *mdev;
 624        struct mlx5_core_mkey  umr_mkey;
 625        struct mlx5e_dma_info  wqe_overflow;
 626
 627        /* XDP read-mostly */
 628        struct xdp_rxq_info    xdp_rxq;
 629} ____cacheline_aligned_in_smp;
 630
 631enum mlx5e_channel_state {
 632        MLX5E_CHANNEL_STATE_XSK,
 633        MLX5E_CHANNEL_NUM_STATES
 634};
 635
 636struct mlx5e_channel {
 637        /* data path */
 638        struct mlx5e_rq            rq;
 639        struct mlx5e_xdpsq         rq_xdpsq;
 640        struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
 641        struct mlx5e_icosq         icosq;   /* internal control operations */
 642        bool                       xdp;
 643        struct napi_struct         napi;
 644        struct device             *pdev;
 645        struct net_device         *netdev;
 646        __be32                     mkey_be;
 647        u8                         num_tc;
 648        u8                         lag_port;
 649
 650        /* XDP_REDIRECT */
 651        struct mlx5e_xdpsq         xdpsq;
 652
 653        /* AF_XDP zero-copy */
 654        struct mlx5e_rq            xskrq;
 655        struct mlx5e_xdpsq         xsksq;
 656
 657        /* Async ICOSQ */
 658        struct mlx5e_icosq         async_icosq;
 659        /* async_icosq can be accessed from any CPU - the spinlock protects it. */
 660        spinlock_t                 async_icosq_lock;
 661
 662        /* data path - accessed per napi poll */
 663        struct irq_desc *irq_desc;
 664        struct mlx5e_ch_stats     *stats;
 665
 666        /* control */
 667        struct mlx5e_priv         *priv;
 668        struct mlx5_core_dev      *mdev;
 669        struct hwtstamp_config    *tstamp;
 670        DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
 671        int                        ix;
 672        int                        cpu;
 673};
 674
 675struct mlx5e_channels {
 676        struct mlx5e_channel **c;
 677        unsigned int           num;
 678        struct mlx5e_params    params;
 679};
 680
 681struct mlx5e_channel_stats {
 682        struct mlx5e_ch_stats ch;
 683        struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 684        struct mlx5e_rq_stats rq;
 685        struct mlx5e_rq_stats xskrq;
 686        struct mlx5e_xdpsq_stats rq_xdpsq;
 687        struct mlx5e_xdpsq_stats xdpsq;
 688        struct mlx5e_xdpsq_stats xsksq;
 689} ____cacheline_aligned_in_smp;
 690
 691enum {
 692        MLX5E_STATE_OPENED,
 693        MLX5E_STATE_DESTROYING,
 694        MLX5E_STATE_XDP_TX_ENABLED,
 695        MLX5E_STATE_XDP_ACTIVE,
 696};
 697
 698struct mlx5e_rqt {
 699        u32              rqtn;
 700        bool             enabled;
 701};
 702
 703struct mlx5e_tir {
 704        u32               tirn;
 705        struct mlx5e_rqt  rqt;
 706        struct list_head  list;
 707};
 708
 709enum {
 710        MLX5E_TC_PRIO = 0,
 711        MLX5E_NIC_PRIO
 712};
 713
 714struct mlx5e_rss_params {
 715        u32     indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 716        u32     rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
 717        u8      toeplitz_hash_key[40];
 718        u8      hfunc;
 719};
 720
 721struct mlx5e_modify_sq_param {
 722        int curr_state;
 723        int next_state;
 724        int rl_update;
 725        int rl_index;
 726};
 727
 728#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
 729struct mlx5e_hv_vhca_stats_agent {
 730        struct mlx5_hv_vhca_agent *agent;
 731        struct delayed_work        work;
 732        u16                        delay;
 733        void                      *buf;
 734};
 735#endif
 736
 737struct mlx5e_xsk {
 738        /* UMEMs are stored separately from channels, because we don't want to
 739         * lose them when channels are recreated. The kernel also stores UMEMs,
 740         * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
 741         * so rely on our mechanism.
 742         */
 743        struct xdp_umem **umems;
 744        u16 refcnt;
 745        bool ever_used;
 746};
 747
 748/* Temporary storage for variables that are allocated when struct mlx5e_priv is
 749 * initialized, and used where we can't allocate them because that functions
 750 * must not fail. Use with care and make sure the same variable is not used
 751 * simultaneously by multiple users.
 752 */
 753struct mlx5e_scratchpad {
 754        cpumask_var_t cpumask;
 755};
 756
 757struct mlx5e_priv {
 758        /* priv data path fields - start */
 759        struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
 760        int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 761#ifdef CONFIG_MLX5_CORE_EN_DCB
 762        struct mlx5e_dcbx_dp       dcbx_dp;
 763#endif
 764        /* priv data path fields - end */
 765
 766        u32                        msglevel;
 767        unsigned long              state;
 768        struct mutex               state_lock; /* Protects Interface state */
 769        struct mlx5e_rq            drop_rq;
 770
 771        struct mlx5e_channels      channels;
 772        u32                        tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
 773        struct mlx5e_rqt           indir_rqt;
 774        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 775        struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
 776        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 777        struct mlx5e_tir           xsk_tir[MLX5E_MAX_NUM_CHANNELS];
 778        struct mlx5e_rss_params    rss_params;
 779        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 780
 781        struct mlx5e_flow_steering fs;
 782
 783        struct workqueue_struct    *wq;
 784        struct work_struct         update_carrier_work;
 785        struct work_struct         set_rx_mode_work;
 786        struct work_struct         tx_timeout_work;
 787        struct work_struct         update_stats_work;
 788        struct work_struct         monitor_counters_work;
 789        struct mlx5_nb             monitor_counters_nb;
 790
 791        struct mlx5_core_dev      *mdev;
 792        struct net_device         *netdev;
 793        struct mlx5e_stats         stats;
 794        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
 795        u16                        max_nch;
 796        u8                         max_opened_tc;
 797        struct hwtstamp_config     tstamp;
 798        u16                        q_counter;
 799        u16                        drop_rq_q_counter;
 800        struct notifier_block      events_nb;
 801
 802        struct udp_tunnel_nic_info nic_info;
 803#ifdef CONFIG_MLX5_CORE_EN_DCB
 804        struct mlx5e_dcbx          dcbx;
 805#endif
 806
 807        const struct mlx5e_profile *profile;
 808        void                      *ppriv;
 809#ifdef CONFIG_MLX5_EN_IPSEC
 810        struct mlx5e_ipsec        *ipsec;
 811#endif
 812#ifdef CONFIG_MLX5_EN_TLS
 813        struct mlx5e_tls          *tls;
 814#endif
 815        struct devlink_health_reporter *tx_reporter;
 816        struct devlink_health_reporter *rx_reporter;
 817        struct devlink_port            dl_port;
 818        struct mlx5e_xsk           xsk;
 819#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
 820        struct mlx5e_hv_vhca_stats_agent stats_agent;
 821#endif
 822        struct mlx5e_scratchpad    scratchpad;
 823};
 824
 825struct mlx5e_rx_handlers {
 826        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 827        mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
 828};
 829
 830extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
 831
 832struct mlx5e_profile {
 833        int     (*init)(struct mlx5_core_dev *mdev,
 834                        struct net_device *netdev,
 835                        const struct mlx5e_profile *profile, void *ppriv);
 836        void    (*cleanup)(struct mlx5e_priv *priv);
 837        int     (*init_rx)(struct mlx5e_priv *priv);
 838        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 839        int     (*init_tx)(struct mlx5e_priv *priv);
 840        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 841        void    (*enable)(struct mlx5e_priv *priv);
 842        void    (*disable)(struct mlx5e_priv *priv);
 843        int     (*update_rx)(struct mlx5e_priv *priv);
 844        void    (*update_stats)(struct mlx5e_priv *priv);
 845        void    (*update_carrier)(struct mlx5e_priv *priv);
 846        unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
 847        mlx5e_stats_grp_t *stats_grps;
 848        const struct mlx5e_rx_handlers *rx_handlers;
 849        int     max_tc;
 850        u8      rq_groups;
 851};
 852
 853void mlx5e_build_ptys2ethtool_map(void);
 854
 855bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 856bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
 857                                struct mlx5e_params *params);
 858
 859void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 860void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
 861
 862void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 863int mlx5e_self_test_num(struct mlx5e_priv *priv);
 864void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
 865                     u64 *buf);
 866void mlx5e_set_rx_mode_work(struct work_struct *work);
 867
 868int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 869int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
 870int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 871
 872int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 873                          u16 vid);
 874int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 875                           u16 vid);
 876void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 877
 878struct mlx5e_redirect_rqt_param {
 879        bool is_rss;
 880        union {
 881                u32 rqn; /* Direct RQN (Non-RSS) */
 882                struct {
 883                        u8 hfunc;
 884                        struct mlx5e_channels *channels;
 885                } rss; /* RSS data */
 886        };
 887};
 888
 889int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
 890                       struct mlx5e_redirect_rqt_param rrp);
 891void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
 892                                    const struct mlx5e_tirc_config *ttconfig,
 893                                    void *tirc, bool inner);
 894void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
 895struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
 896
 897struct mlx5e_xsk_param;
 898
 899struct mlx5e_rq_param;
 900int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
 901                  struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
 902                  struct xdp_umem *umem, struct mlx5e_rq *rq);
 903int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
 904void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 905void mlx5e_close_rq(struct mlx5e_rq *rq);
 906
 907struct mlx5e_sq_param;
 908int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
 909                     struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
 910void mlx5e_close_icosq(struct mlx5e_icosq *sq);
 911int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
 912                     struct mlx5e_sq_param *param, struct xdp_umem *umem,
 913                     struct mlx5e_xdpsq *sq, bool is_redirect);
 914void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
 915
 916struct mlx5e_cq_param;
 917int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
 918                  struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
 919void mlx5e_close_cq(struct mlx5e_cq *cq);
 920
 921int mlx5e_open_locked(struct net_device *netdev);
 922int mlx5e_close_locked(struct net_device *netdev);
 923
 924int mlx5e_open_channels(struct mlx5e_priv *priv,
 925                        struct mlx5e_channels *chs);
 926void mlx5e_close_channels(struct mlx5e_channels *chs);
 927
 928/* Function pointer to be used to modify HW or kernel settings while
 929 * switching channels
 930 */
 931typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
 932#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
 933int fn##_ctx(struct mlx5e_priv *priv, void *context) \
 934{ \
 935        return fn(priv); \
 936}
 937int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
 938int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
 939                               struct mlx5e_channels *new_chs,
 940                               mlx5e_fp_preactivate preactivate,
 941                               void *context);
 942int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
 943int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
 944void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
 945void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 946
 947void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
 948                                   int num_channels);
 949
 950void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
 951void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
 952void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
 953void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
 954
 955void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 956void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
 957                               struct mlx5e_params *params);
 958int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
 959void mlx5e_activate_rq(struct mlx5e_rq *rq);
 960void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 961void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
 962void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
 963
 964int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
 965                    struct mlx5e_modify_sq_param *p);
 966void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
 967void mlx5e_tx_disable_queue(struct netdev_queue *txq);
 968
 969static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
 970{
 971        return MLX5_CAP_ETH(mdev, swp) &&
 972                MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
 973}
 974
 975extern const struct ethtool_ops mlx5e_ethtool_ops;
 976
 977int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
 978                     u32 *in);
 979void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
 980                       struct mlx5e_tir *tir);
 981int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
 982void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 983int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
 984                       bool enable_mc_lb);
 985void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
 986
 987/* common netdev helpers */
 988void mlx5e_create_q_counters(struct mlx5e_priv *priv);
 989void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
 990int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
 991                       struct mlx5e_rq *drop_rq);
 992void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
 993
 994int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
 995
 996int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
 997void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
 998
 999int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1000void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1001int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1002void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1003void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1004
1005int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1006void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1007
1008int mlx5e_create_tises(struct mlx5e_priv *priv);
1009void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1010int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1011void mlx5e_update_carrier(struct mlx5e_priv *priv);
1012int mlx5e_close(struct net_device *netdev);
1013int mlx5e_open(struct net_device *netdev);
1014
1015void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1016int mlx5e_bits_invert(unsigned long a, int size);
1017
1018int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1019int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1020int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1021                     mlx5e_fp_preactivate preactivate);
1022void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
1023
1024/* ethtool helpers */
1025void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1026                               struct ethtool_drvinfo *drvinfo);
1027void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1028                               uint32_t stringset, uint8_t *data);
1029int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1030void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1031                                     struct ethtool_stats *stats, u64 *data);
1032void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1033                                 struct ethtool_ringparam *param);
1034int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1035                                struct ethtool_ringparam *param);
1036void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1037                                struct ethtool_channels *ch);
1038int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1039                               struct ethtool_channels *ch);
1040int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1041                               struct ethtool_coalesce *coal);
1042int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1043                               struct ethtool_coalesce *coal);
1044int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1045                                     struct ethtool_link_ksettings *link_ksettings);
1046int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1047                                     const struct ethtool_link_ksettings *link_ksettings);
1048int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1049int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1050                   const u8 hfunc);
1051int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1052                    u32 *rule_locs);
1053int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1054u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1055u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1056int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1057                              struct ethtool_ts_info *info);
1058int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1059                               struct ethtool_flash *flash);
1060void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1061                                  struct ethtool_pauseparam *pauseparam);
1062int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1063                                 struct ethtool_pauseparam *pauseparam);
1064
1065/* mlx5e generic netdev management API */
1066int mlx5e_netdev_init(struct net_device *netdev,
1067                      struct mlx5e_priv *priv,
1068                      struct mlx5_core_dev *mdev,
1069                      const struct mlx5e_profile *profile,
1070                      void *ppriv);
1071void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
1072struct net_device*
1073mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1074                    int nch, void *ppriv);
1075int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1076void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1077void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1078void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1079void mlx5e_build_nic_params(struct mlx5e_priv *priv,
1080                            struct mlx5e_xsk *xsk,
1081                            struct mlx5e_rss_params *rss_params,
1082                            struct mlx5e_params *params,
1083                            u16 mtu);
1084void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1085                           struct mlx5e_params *params);
1086void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1087                            u16 num_channels);
1088void mlx5e_rx_dim_work(struct work_struct *work);
1089void mlx5e_tx_dim_work(struct work_struct *work);
1090
1091netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1092                                       struct net_device *netdev,
1093                                       netdev_features_t features);
1094int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1095#ifdef CONFIG_MLX5_ESWITCH
1096int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1097int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1098int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1099int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1100#endif
1101#endif /* __MLX5_EN_H__ */
1102