linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/crash_dump.h>
  40#include <linux/mlx5/driver.h>
  41#include <linux/mlx5/qp.h>
  42#include <linux/mlx5/cq.h>
  43#include <linux/mlx5/port.h>
  44#include <linux/mlx5/vport.h>
  45#include <linux/mlx5/transobj.h>
  46#include <linux/mlx5/fs.h>
  47#include <linux/rhashtable.h>
  48#include <net/udp_tunnel.h>
  49#include <net/switchdev.h>
  50#include <net/xdp.h>
  51#include <linux/dim.h>
  52#include <linux/bits.h>
  53#include "wq.h"
  54#include "mlx5_core.h"
  55#include "en_stats.h"
  56#include "en/dcbnl.h"
  57#include "en/fs.h"
  58#include "en/qos.h"
  59#include "lib/hv_vhca.h"
  60#include "lib/clock.h"
  61
  62extern const struct net_device_ops mlx5e_netdev_ops;
  63struct page_pool;
  64
  65#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
  66#define MLX5E_METADATA_ETHER_LEN 8
  67
  68#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  69
  70#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  71
  72#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
  73#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
  74
  75#define MLX5E_MAX_NUM_TC        8
  76
  77#define MLX5_RX_HEADROOM NET_SKB_PAD
  78#define MLX5_SKB_FRAG_SZ(len)   (SKB_DATA_ALIGN(len) +  \
  79                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  80
  81#define MLX5E_RX_MAX_HEAD (256)
  82
  83#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
  84        (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
  85#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
  86        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
  87#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
  88        MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
  89
  90#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  91#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  92                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  93#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  94
  95#define MLX5_ALIGN_MTTS(mtts)           (ALIGN(mtts, 8))
  96#define MLX5_ALIGNED_MTTS_OCTW(mtts)    ((mtts) / 2)
  97#define MLX5_MTT_OCTW(mtts)             (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
  98/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
  99 * WQEs, This page will absorb write overflow by the hardware, when
 100 * receiving packets larger than MTU. These oversize packets are
 101 * dropped by the driver at a later stage.
 102 */
 103#define MLX5E_REQUIRED_WQE_MTTS         (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
 104#define MLX5E_REQUIRED_MTTS(wqes)       (wqes * MLX5E_REQUIRED_WQE_MTTS)
 105#define MLX5E_MAX_RQ_NUM_MTTS   \
 106        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
 107#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
 108#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW    \
 109                (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
 110#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
 111        (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
 112         (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
 113
 114#define MLX5E_MIN_SKB_FRAG_SZ           (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
 115#define MLX5E_LOG_MAX_RX_WQE_BULK       \
 116        (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
 117
 118#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 119#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 120#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 121
 122#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
 123#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 124#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
 125                                               MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
 126
 127#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
 128
 129#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 130#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
 131#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
 132
 133#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 134#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
 135#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 136#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 137#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
 138#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 139#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 140#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 141
 142#define MLX5E_LOG_INDIR_RQT_SIZE       0x8
 143#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 144#define MLX5E_MIN_NUM_CHANNELS         0x1
 145#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE / 2)
 146#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 147#define MLX5E_TX_CQ_POLL_BUDGET        128
 148#define MLX5E_TX_XSK_POLL_BUDGET       64
 149#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 150
 151#define MLX5E_UMR_WQE_INLINE_SZ \
 152        (sizeof(struct mlx5e_umr_wqe) + \
 153         ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
 154               MLX5_UMR_MTT_ALIGNMENT))
 155#define MLX5E_UMR_WQEBBS \
 156        (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
 157
 158#define MLX5E_MSG_LEVEL                 NETIF_MSG_LINK
 159
 160#define mlx5e_dbg(mlevel, priv, format, ...)                    \
 161do {                                                            \
 162        if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
 163                netdev_warn(priv->netdev, format,               \
 164                            ##__VA_ARGS__);                     \
 165} while (0)
 166
 167#define mlx5e_state_dereference(priv, p) \
 168        rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
 169
 170enum mlx5e_rq_group {
 171        MLX5E_RQ_GROUP_REGULAR,
 172        MLX5E_RQ_GROUP_XSK,
 173#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
 174};
 175
 176static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
 177{
 178        if (mlx5_lag_is_lacp_owner(mdev))
 179                return 1;
 180
 181        return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
 182}
 183
 184static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 185{
 186        switch (wq_type) {
 187        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 188                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 189                             wq_size / 2);
 190        default:
 191                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 192                             wq_size / 2);
 193        }
 194}
 195
 196/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
 197static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 198{
 199        return is_kdump_kernel() ?
 200                MLX5E_MIN_NUM_CHANNELS :
 201                min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
 202}
 203
 204struct mlx5e_tx_wqe {
 205        struct mlx5_wqe_ctrl_seg ctrl;
 206        struct mlx5_wqe_eth_seg  eth;
 207        struct mlx5_wqe_data_seg data[0];
 208};
 209
 210struct mlx5e_rx_wqe_ll {
 211        struct mlx5_wqe_srq_next_seg  next;
 212        struct mlx5_wqe_data_seg      data[];
 213};
 214
 215struct mlx5e_rx_wqe_cyc {
 216        struct mlx5_wqe_data_seg      data[0];
 217};
 218
 219struct mlx5e_umr_wqe {
 220        struct mlx5_wqe_ctrl_seg       ctrl;
 221        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 222        struct mlx5_mkey_seg           mkc;
 223        struct mlx5_mtt                inline_mtts[0];
 224};
 225
 226extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
 227
 228enum mlx5e_priv_flag {
 229        MLX5E_PFLAG_RX_CQE_BASED_MODER,
 230        MLX5E_PFLAG_TX_CQE_BASED_MODER,
 231        MLX5E_PFLAG_RX_CQE_COMPRESS,
 232        MLX5E_PFLAG_RX_STRIDING_RQ,
 233        MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
 234        MLX5E_PFLAG_XDP_TX_MPWQE,
 235        MLX5E_PFLAG_SKB_TX_MPWQE,
 236        MLX5E_PFLAG_TX_PORT_TS,
 237        MLX5E_NUM_PFLAGS, /* Keep last */
 238};
 239
 240#define MLX5E_SET_PFLAG(params, pflag, enable)                  \
 241        do {                                                    \
 242                if (enable)                                     \
 243                        (params)->pflags |= BIT(pflag);         \
 244                else                                            \
 245                        (params)->pflags &= ~(BIT(pflag));      \
 246        } while (0)
 247
 248#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
 249
 250struct mlx5e_params {
 251        u8  log_sq_size;
 252        u8  rq_wq_type;
 253        u8  log_rq_mtu_frames;
 254        u16 num_channels;
 255        u8  num_tc;
 256        bool rx_cqe_compress_def;
 257        bool tunneled_offload_en;
 258        struct dim_cq_moder rx_cq_moderation;
 259        struct dim_cq_moder tx_cq_moderation;
 260        bool lro_en;
 261        u8  tx_min_inline_mode;
 262        bool vlan_strip_disable;
 263        bool scatter_fcs_en;
 264        bool rx_dim_enabled;
 265        bool tx_dim_enabled;
 266        u32 lro_timeout;
 267        u32 pflags;
 268        struct bpf_prog *xdp_prog;
 269        struct mlx5e_xsk *xsk;
 270        unsigned int sw_mtu;
 271        int hard_mtu;
 272};
 273
 274enum {
 275        MLX5E_RQ_STATE_ENABLED,
 276        MLX5E_RQ_STATE_RECOVERING,
 277        MLX5E_RQ_STATE_AM,
 278        MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 279        MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
 280        MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
 281        MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
 282};
 283
 284struct mlx5e_cq {
 285        /* data path - accessed per cqe */
 286        struct mlx5_cqwq           wq;
 287
 288        /* data path - accessed per napi poll */
 289        u16                        event_ctr;
 290        struct napi_struct        *napi;
 291        struct mlx5_core_cq        mcq;
 292        struct mlx5e_ch_stats     *ch_stats;
 293
 294        /* control */
 295        struct net_device         *netdev;
 296        struct mlx5_core_dev      *mdev;
 297        struct mlx5e_priv         *priv;
 298        struct mlx5_wq_ctrl        wq_ctrl;
 299} ____cacheline_aligned_in_smp;
 300
 301struct mlx5e_cq_decomp {
 302        /* cqe decompression */
 303        struct mlx5_cqe64          title;
 304        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 305        u8                         mini_arr_idx;
 306        u16                        left;
 307        u16                        wqe_counter;
 308} ____cacheline_aligned_in_smp;
 309
 310enum mlx5e_dma_map_type {
 311        MLX5E_DMA_MAP_SINGLE,
 312        MLX5E_DMA_MAP_PAGE
 313};
 314
 315struct mlx5e_sq_dma {
 316        dma_addr_t              addr;
 317        u32                     size;
 318        enum mlx5e_dma_map_type type;
 319};
 320
 321enum {
 322        MLX5E_SQ_STATE_ENABLED,
 323        MLX5E_SQ_STATE_MPWQE,
 324        MLX5E_SQ_STATE_RECOVERING,
 325        MLX5E_SQ_STATE_IPSEC,
 326        MLX5E_SQ_STATE_AM,
 327        MLX5E_SQ_STATE_TLS,
 328        MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
 329        MLX5E_SQ_STATE_PENDING_XSK_TX,
 330};
 331
 332struct mlx5e_tx_mpwqe {
 333        /* Current MPWQE session */
 334        struct mlx5e_tx_wqe *wqe;
 335        u32 bytes_count;
 336        u8 ds_count;
 337        u8 pkt_count;
 338        u8 inline_on;
 339};
 340
 341struct mlx5e_skb_fifo {
 342        struct sk_buff **fifo;
 343        u16 *pc;
 344        u16 *cc;
 345        u16 mask;
 346};
 347
 348struct mlx5e_ptpsq;
 349
 350struct mlx5e_txqsq {
 351        /* data path */
 352
 353        /* dirtied @completion */
 354        u16                        cc;
 355        u16                        skb_fifo_cc;
 356        u32                        dma_fifo_cc;
 357        struct dim                 dim; /* Adaptive Moderation */
 358
 359        /* dirtied @xmit */
 360        u16                        pc ____cacheline_aligned_in_smp;
 361        u16                        skb_fifo_pc;
 362        u32                        dma_fifo_pc;
 363        struct mlx5e_tx_mpwqe      mpwqe;
 364
 365        struct mlx5e_cq            cq;
 366
 367        /* read only */
 368        struct mlx5_wq_cyc         wq;
 369        u32                        dma_fifo_mask;
 370        struct mlx5e_sq_stats     *stats;
 371        struct {
 372                struct mlx5e_sq_dma       *dma_fifo;
 373                struct mlx5e_skb_fifo      skb_fifo;
 374                struct mlx5e_tx_wqe_info  *wqe_info;
 375        } db;
 376        void __iomem              *uar_map;
 377        struct netdev_queue       *txq;
 378        u32                        sqn;
 379        u16                        stop_room;
 380        u8                         min_inline_mode;
 381        struct device             *pdev;
 382        __be32                     mkey_be;
 383        unsigned long              state;
 384        unsigned int               hw_mtu;
 385        struct hwtstamp_config    *tstamp;
 386        struct mlx5_clock         *clock;
 387        struct net_device         *netdev;
 388        struct mlx5_core_dev      *mdev;
 389        struct mlx5e_priv         *priv;
 390
 391        /* control path */
 392        struct mlx5_wq_ctrl        wq_ctrl;
 393        int                        ch_ix;
 394        int                        txq_ix;
 395        u32                        rate_limit;
 396        struct work_struct         recover_work;
 397        struct mlx5e_ptpsq        *ptpsq;
 398        cqe_ts_to_ns               ptp_cyc2time;
 399} ____cacheline_aligned_in_smp;
 400
 401struct mlx5e_dma_info {
 402        dma_addr_t addr;
 403        union {
 404                struct page *page;
 405                struct xdp_buff *xsk;
 406        };
 407};
 408
 409/* XDP packets can be transmitted in different ways. On completion, we need to
 410 * distinguish between them to clean up things in a proper way.
 411 */
 412enum mlx5e_xdp_xmit_mode {
 413        /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
 414         * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
 415         * returned.
 416         */
 417        MLX5E_XDP_XMIT_MODE_FRAME,
 418
 419        /* The xdp_frame was created in place as a result of XDP_TX from a
 420         * regular RQ. No DMA remapping happened, and the page belongs to us.
 421         */
 422        MLX5E_XDP_XMIT_MODE_PAGE,
 423
 424        /* No xdp_frame was created at all, the transmit happened from a UMEM
 425         * page. The UMEM Completion Ring producer pointer has to be increased.
 426         */
 427        MLX5E_XDP_XMIT_MODE_XSK,
 428};
 429
 430struct mlx5e_xdp_info {
 431        enum mlx5e_xdp_xmit_mode mode;
 432        union {
 433                struct {
 434                        struct xdp_frame *xdpf;
 435                        dma_addr_t dma_addr;
 436                } frame;
 437                struct {
 438                        struct mlx5e_rq *rq;
 439                        struct mlx5e_dma_info di;
 440                } page;
 441        };
 442};
 443
 444struct mlx5e_xmit_data {
 445        dma_addr_t  dma_addr;
 446        void       *data;
 447        u32         len;
 448};
 449
 450struct mlx5e_xdp_info_fifo {
 451        struct mlx5e_xdp_info *xi;
 452        u32 *cc;
 453        u32 *pc;
 454        u32 mask;
 455};
 456
 457struct mlx5e_xdpsq;
 458typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
 459typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
 460                                        struct mlx5e_xmit_data *,
 461                                        struct mlx5e_xdp_info *,
 462                                        int);
 463
 464struct mlx5e_xdpsq {
 465        /* data path */
 466
 467        /* dirtied @completion */
 468        u32                        xdpi_fifo_cc;
 469        u16                        cc;
 470
 471        /* dirtied @xmit */
 472        u32                        xdpi_fifo_pc ____cacheline_aligned_in_smp;
 473        u16                        pc;
 474        struct mlx5_wqe_ctrl_seg   *doorbell_cseg;
 475        struct mlx5e_tx_mpwqe      mpwqe;
 476
 477        struct mlx5e_cq            cq;
 478
 479        /* read only */
 480        struct xsk_buff_pool      *xsk_pool;
 481        struct mlx5_wq_cyc         wq;
 482        struct mlx5e_xdpsq_stats  *stats;
 483        mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
 484        mlx5e_fp_xmit_xdp_frame    xmit_xdp_frame;
 485        struct {
 486                struct mlx5e_xdp_wqe_info *wqe_info;
 487                struct mlx5e_xdp_info_fifo xdpi_fifo;
 488        } db;
 489        void __iomem              *uar_map;
 490        u32                        sqn;
 491        struct device             *pdev;
 492        __be32                     mkey_be;
 493        u8                         min_inline_mode;
 494        unsigned long              state;
 495        unsigned int               hw_mtu;
 496
 497        /* control path */
 498        struct mlx5_wq_ctrl        wq_ctrl;
 499        struct mlx5e_channel      *channel;
 500} ____cacheline_aligned_in_smp;
 501
 502struct mlx5e_icosq {
 503        /* data path */
 504        u16                        cc;
 505        u16                        pc;
 506
 507        struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
 508        struct mlx5e_cq            cq;
 509
 510        /* write@xmit, read@completion */
 511        struct {
 512                struct mlx5e_icosq_wqe_info *wqe_info;
 513        } db;
 514
 515        /* read only */
 516        struct mlx5_wq_cyc         wq;
 517        void __iomem              *uar_map;
 518        u32                        sqn;
 519        u16                        reserved_room;
 520        unsigned long              state;
 521
 522        /* control path */
 523        struct mlx5_wq_ctrl        wq_ctrl;
 524        struct mlx5e_channel      *channel;
 525
 526        struct work_struct         recover_work;
 527} ____cacheline_aligned_in_smp;
 528
 529struct mlx5e_wqe_frag_info {
 530        struct mlx5e_dma_info *di;
 531        u32 offset;
 532        bool last_in_page;
 533};
 534
 535struct mlx5e_umr_dma_info {
 536        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 537};
 538
 539struct mlx5e_mpw_info {
 540        struct mlx5e_umr_dma_info umr;
 541        u16 consumed_strides;
 542        DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 543};
 544
 545#define MLX5E_MAX_RX_FRAGS 4
 546
 547/* a single cache unit is capable to serve one napi call (for non-striding rq)
 548 * or a MPWQE (for striding rq).
 549 */
 550#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 551                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 552#define MLX5E_CACHE_SIZE        (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 553struct mlx5e_page_cache {
 554        u32 head;
 555        u32 tail;
 556        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 557};
 558
 559struct mlx5e_rq;
 560typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 561typedef struct sk_buff *
 562(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 563                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 564typedef struct sk_buff *
 565(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 566                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 567typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 568typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 569
 570int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
 571void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
 572
 573enum mlx5e_rq_flag {
 574        MLX5E_RQ_FLAG_XDP_XMIT,
 575        MLX5E_RQ_FLAG_XDP_REDIRECT,
 576};
 577
 578struct mlx5e_rq_frag_info {
 579        int frag_size;
 580        int frag_stride;
 581};
 582
 583struct mlx5e_rq_frags_info {
 584        struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
 585        u8 num_frags;
 586        u8 log_num_frags;
 587        u8 wqe_bulk;
 588};
 589
 590struct mlx5e_rq {
 591        /* data path */
 592        union {
 593                struct {
 594                        struct mlx5_wq_cyc          wq;
 595                        struct mlx5e_wqe_frag_info *frags;
 596                        struct mlx5e_dma_info      *di;
 597                        struct mlx5e_rq_frags_info  info;
 598                        mlx5e_fp_skb_from_cqe       skb_from_cqe;
 599                } wqe;
 600                struct {
 601                        struct mlx5_wq_ll      wq;
 602                        struct mlx5e_umr_wqe   umr_wqe;
 603                        struct mlx5e_mpw_info *info;
 604                        mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
 605                        u16                    num_strides;
 606                        u16                    actual_wq_head;
 607                        u8                     log_stride_sz;
 608                        u8                     umr_in_progress;
 609                        u8                     umr_last_bulk;
 610                        u8                     umr_completed;
 611                } mpwqe;
 612        };
 613        struct {
 614                u16            headroom;
 615                u32            frame0_sz;
 616                u8             map_dir;   /* dma map direction */
 617        } buff;
 618
 619        struct device         *pdev;
 620        struct net_device     *netdev;
 621        struct mlx5e_rq_stats *stats;
 622        struct mlx5e_cq        cq;
 623        struct mlx5e_cq_decomp cqd;
 624        struct mlx5e_page_cache page_cache;
 625        struct hwtstamp_config *tstamp;
 626        struct mlx5_clock      *clock;
 627        struct mlx5e_icosq    *icosq;
 628        struct mlx5e_priv     *priv;
 629
 630        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 631        mlx5e_fp_post_rx_wqes  post_wqes;
 632        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 633
 634        unsigned long          state;
 635        int                    ix;
 636        unsigned int           hw_mtu;
 637
 638        struct dim         dim; /* Dynamic Interrupt Moderation */
 639
 640        /* XDP */
 641        struct bpf_prog __rcu *xdp_prog;
 642        struct mlx5e_xdpsq    *xdpsq;
 643        DECLARE_BITMAP(flags, 8);
 644        struct page_pool      *page_pool;
 645
 646        /* AF_XDP zero-copy */
 647        struct xsk_buff_pool  *xsk_pool;
 648
 649        struct work_struct     recover_work;
 650
 651        /* control */
 652        struct mlx5_wq_ctrl    wq_ctrl;
 653        __be32                 mkey_be;
 654        u8                     wq_type;
 655        u32                    rqn;
 656        struct mlx5_core_dev  *mdev;
 657        struct mlx5_core_mkey  umr_mkey;
 658        struct mlx5e_dma_info  wqe_overflow;
 659
 660        /* XDP read-mostly */
 661        struct xdp_rxq_info    xdp_rxq;
 662        cqe_ts_to_ns           ptp_cyc2time;
 663} ____cacheline_aligned_in_smp;
 664
 665enum mlx5e_channel_state {
 666        MLX5E_CHANNEL_STATE_XSK,
 667        MLX5E_CHANNEL_NUM_STATES
 668};
 669
 670struct mlx5e_channel {
 671        /* data path */
 672        struct mlx5e_rq            rq;
 673        struct mlx5e_xdpsq         rq_xdpsq;
 674        struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
 675        struct mlx5e_icosq         icosq;   /* internal control operations */
 676        struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
 677        bool                       xdp;
 678        struct napi_struct         napi;
 679        struct device             *pdev;
 680        struct net_device         *netdev;
 681        __be32                     mkey_be;
 682        u16                        qos_sqs_size;
 683        u8                         num_tc;
 684        u8                         lag_port;
 685
 686        /* XDP_REDIRECT */
 687        struct mlx5e_xdpsq         xdpsq;
 688
 689        /* AF_XDP zero-copy */
 690        struct mlx5e_rq            xskrq;
 691        struct mlx5e_xdpsq         xsksq;
 692
 693        /* Async ICOSQ */
 694        struct mlx5e_icosq         async_icosq;
 695        /* async_icosq can be accessed from any CPU - the spinlock protects it. */
 696        spinlock_t                 async_icosq_lock;
 697
 698        /* data path - accessed per napi poll */
 699        const struct cpumask      *aff_mask;
 700        struct mlx5e_ch_stats     *stats;
 701
 702        /* control */
 703        struct mlx5e_priv         *priv;
 704        struct mlx5_core_dev      *mdev;
 705        struct hwtstamp_config    *tstamp;
 706        DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
 707        int                        ix;
 708        int                        cpu;
 709};
 710
 711struct mlx5e_port_ptp;
 712
 713struct mlx5e_channels {
 714        struct mlx5e_channel **c;
 715        struct mlx5e_port_ptp  *port_ptp;
 716        unsigned int           num;
 717        struct mlx5e_params    params;
 718};
 719
 720struct mlx5e_channel_stats {
 721        struct mlx5e_ch_stats ch;
 722        struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 723        struct mlx5e_rq_stats rq;
 724        struct mlx5e_rq_stats xskrq;
 725        struct mlx5e_xdpsq_stats rq_xdpsq;
 726        struct mlx5e_xdpsq_stats xdpsq;
 727        struct mlx5e_xdpsq_stats xsksq;
 728} ____cacheline_aligned_in_smp;
 729
 730struct mlx5e_port_ptp_stats {
 731        struct mlx5e_ch_stats ch;
 732        struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 733        struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
 734} ____cacheline_aligned_in_smp;
 735
 736enum {
 737        MLX5E_STATE_OPENED,
 738        MLX5E_STATE_DESTROYING,
 739        MLX5E_STATE_XDP_TX_ENABLED,
 740        MLX5E_STATE_XDP_ACTIVE,
 741};
 742
 743struct mlx5e_rqt {
 744        u32              rqtn;
 745        bool             enabled;
 746};
 747
 748struct mlx5e_tir {
 749        u32               tirn;
 750        struct mlx5e_rqt  rqt;
 751        struct list_head  list;
 752};
 753
 754enum {
 755        MLX5E_TC_PRIO = 0,
 756        MLX5E_NIC_PRIO
 757};
 758
 759struct mlx5e_rss_params {
 760        u32     indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 761        u32     rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
 762        u8      toeplitz_hash_key[40];
 763        u8      hfunc;
 764};
 765
 766struct mlx5e_modify_sq_param {
 767        int curr_state;
 768        int next_state;
 769        int rl_update;
 770        int rl_index;
 771        bool qos_update;
 772        u16 qos_queue_group_id;
 773};
 774
 775#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
 776struct mlx5e_hv_vhca_stats_agent {
 777        struct mlx5_hv_vhca_agent *agent;
 778        struct delayed_work        work;
 779        u16                        delay;
 780        void                      *buf;
 781};
 782#endif
 783
 784struct mlx5e_xsk {
 785        /* XSK buffer pools are stored separately from channels,
 786         * because we don't want to lose them when channels are
 787         * recreated. The kernel also stores buffer pool, but it doesn't
 788         * distinguish between zero-copy and non-zero-copy UMEMs, so
 789         * rely on our mechanism.
 790         */
 791        struct xsk_buff_pool **pools;
 792        u16 refcnt;
 793        bool ever_used;
 794};
 795
 796/* Temporary storage for variables that are allocated when struct mlx5e_priv is
 797 * initialized, and used where we can't allocate them because that functions
 798 * must not fail. Use with care and make sure the same variable is not used
 799 * simultaneously by multiple users.
 800 */
 801struct mlx5e_scratchpad {
 802        cpumask_var_t cpumask;
 803};
 804
 805struct mlx5e_htb {
 806        DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
 807        DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
 808        struct mlx5e_sq_stats **qos_sq_stats;
 809        u16 max_qos_sqs;
 810        u16 maj_id;
 811        u16 defcls;
 812};
 813
 814struct mlx5e_trap;
 815
 816struct mlx5e_priv {
 817        /* priv data path fields - start */
 818        /* +1 for port ptp ts */
 819        struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
 820                                   MLX5E_QOS_MAX_LEAF_NODES];
 821        int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 822        int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
 823#ifdef CONFIG_MLX5_CORE_EN_DCB
 824        struct mlx5e_dcbx_dp       dcbx_dp;
 825#endif
 826        /* priv data path fields - end */
 827
 828        u32                        msglevel;
 829        unsigned long              state;
 830        struct mutex               state_lock; /* Protects Interface state */
 831        struct mlx5e_rq            drop_rq;
 832
 833        struct mlx5e_channels      channels;
 834        u32                        tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
 835        struct mlx5e_rqt           indir_rqt;
 836        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 837        struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
 838        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 839        struct mlx5e_tir           xsk_tir[MLX5E_MAX_NUM_CHANNELS];
 840        struct mlx5e_rss_params    rss_params;
 841        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 842
 843        struct mlx5e_flow_steering fs;
 844
 845        struct workqueue_struct    *wq;
 846        struct work_struct         update_carrier_work;
 847        struct work_struct         set_rx_mode_work;
 848        struct work_struct         tx_timeout_work;
 849        struct work_struct         update_stats_work;
 850        struct work_struct         monitor_counters_work;
 851        struct mlx5_nb             monitor_counters_nb;
 852
 853        struct mlx5_core_dev      *mdev;
 854        struct net_device         *netdev;
 855        struct mlx5e_trap         *en_trap;
 856        struct mlx5e_stats         stats;
 857        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
 858        struct mlx5e_channel_stats trap_stats;
 859        struct mlx5e_port_ptp_stats port_ptp_stats;
 860        u16                        max_nch;
 861        u8                         max_opened_tc;
 862        bool                       port_ptp_opened;
 863        struct hwtstamp_config     tstamp;
 864        u16                        q_counter;
 865        u16                        drop_rq_q_counter;
 866        struct notifier_block      events_nb;
 867        struct notifier_block      blocking_events_nb;
 868        int                        num_tc_x_num_ch;
 869
 870        struct udp_tunnel_nic_info nic_info;
 871#ifdef CONFIG_MLX5_CORE_EN_DCB
 872        struct mlx5e_dcbx          dcbx;
 873#endif
 874
 875        const struct mlx5e_profile *profile;
 876        void                      *ppriv;
 877#ifdef CONFIG_MLX5_EN_IPSEC
 878        struct mlx5e_ipsec        *ipsec;
 879#endif
 880#ifdef CONFIG_MLX5_EN_TLS
 881        struct mlx5e_tls          *tls;
 882#endif
 883        struct devlink_health_reporter *tx_reporter;
 884        struct devlink_health_reporter *rx_reporter;
 885        struct devlink_port            dl_port;
 886        struct mlx5e_xsk           xsk;
 887#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
 888        struct mlx5e_hv_vhca_stats_agent stats_agent;
 889#endif
 890        struct mlx5e_scratchpad    scratchpad;
 891        struct mlx5e_htb           htb;
 892};
 893
 894struct mlx5e_rx_handlers {
 895        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 896        mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
 897};
 898
 899extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
 900
 901struct mlx5e_profile {
 902        int     (*init)(struct mlx5_core_dev *mdev,
 903                        struct net_device *netdev);
 904        void    (*cleanup)(struct mlx5e_priv *priv);
 905        int     (*init_rx)(struct mlx5e_priv *priv);
 906        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 907        int     (*init_tx)(struct mlx5e_priv *priv);
 908        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 909        void    (*enable)(struct mlx5e_priv *priv);
 910        void    (*disable)(struct mlx5e_priv *priv);
 911        int     (*update_rx)(struct mlx5e_priv *priv);
 912        void    (*update_stats)(struct mlx5e_priv *priv);
 913        void    (*update_carrier)(struct mlx5e_priv *priv);
 914        unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
 915        mlx5e_stats_grp_t *stats_grps;
 916        const struct mlx5e_rx_handlers *rx_handlers;
 917        int     max_tc;
 918        u8      rq_groups;
 919};
 920
 921void mlx5e_build_ptys2ethtool_map(void);
 922
 923bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 924bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
 925                                struct mlx5e_params *params);
 926
 927void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 928void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
 929
 930void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 931int mlx5e_self_test_num(struct mlx5e_priv *priv);
 932void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
 933                     u64 *buf);
 934void mlx5e_set_rx_mode_work(struct work_struct *work);
 935
 936int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 937int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
 938int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 939
 940int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 941                          u16 vid);
 942int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 943                           u16 vid);
 944void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 945
 946struct mlx5e_redirect_rqt_param {
 947        bool is_rss;
 948        union {
 949                u32 rqn; /* Direct RQN (Non-RSS) */
 950                struct {
 951                        u8 hfunc;
 952                        struct mlx5e_channels *channels;
 953                } rss; /* RSS data */
 954        };
 955};
 956
 957int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
 958                       struct mlx5e_redirect_rqt_param rrp);
 959void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
 960                                    const struct mlx5e_tirc_config *ttconfig,
 961                                    void *tirc, bool inner);
 962void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
 963struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
 964
 965struct mlx5e_xsk_param;
 966
 967struct mlx5e_rq_param;
 968int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
 969                  struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
 970                  struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
 971int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
 972void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 973void mlx5e_close_rq(struct mlx5e_rq *rq);
 974int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
 975void mlx5e_destroy_rq(struct mlx5e_rq *rq);
 976
 977struct mlx5e_sq_param;
 978int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
 979                     struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
 980void mlx5e_close_icosq(struct mlx5e_icosq *sq);
 981int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
 982                     struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
 983                     struct mlx5e_xdpsq *sq, bool is_redirect);
 984void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
 985
 986struct mlx5e_create_cq_param {
 987        struct napi_struct *napi;
 988        struct mlx5e_ch_stats *ch_stats;
 989        int node;
 990        int ix;
 991};
 992
 993struct mlx5e_cq_param;
 994int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
 995                  struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
 996                  struct mlx5e_cq *cq);
 997void mlx5e_close_cq(struct mlx5e_cq *cq);
 998
 999int mlx5e_open_locked(struct net_device *netdev);
1000int mlx5e_close_locked(struct net_device *netdev);
1001
1002int mlx5e_open_channels(struct mlx5e_priv *priv,
1003                        struct mlx5e_channels *chs);
1004void mlx5e_close_channels(struct mlx5e_channels *chs);
1005
1006/* Function pointer to be used to modify HW or kernel settings while
1007 * switching channels
1008 */
1009typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
1010#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
1011int fn##_ctx(struct mlx5e_priv *priv, void *context) \
1012{ \
1013        return fn(priv); \
1014}
1015int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
1016int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
1017                               struct mlx5e_channels *new_chs,
1018                               mlx5e_fp_preactivate preactivate,
1019                               void *context);
1020int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
1021int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
1022int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
1023void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
1024void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
1025
1026void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
1027                                   int num_channels);
1028
1029void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
1030void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
1031void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1032void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1033
1034void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
1035void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
1036                               struct mlx5e_params *params);
1037int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
1038void mlx5e_activate_rq(struct mlx5e_rq *rq);
1039void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
1040void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
1041void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
1042
1043int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1044                    struct mlx5e_modify_sq_param *p);
1045int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1046                     struct mlx5e_params *params, struct mlx5e_sq_param *param,
1047                     struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
1048void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
1049void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
1050void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
1051void mlx5e_tx_disable_queue(struct netdev_queue *txq);
1052int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
1053void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
1054struct mlx5e_create_sq_param;
1055int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1056                        struct mlx5e_sq_param *param,
1057                        struct mlx5e_create_sq_param *csp,
1058                        u16 qos_queue_group_id,
1059                        u32 *sqn);
1060void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
1061void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
1062
1063static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1064{
1065        return MLX5_CAP_ETH(mdev, swp) &&
1066                MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1067}
1068
1069extern const struct ethtool_ops mlx5e_ethtool_ops;
1070
1071int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
1072                     u32 *in);
1073void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1074                       struct mlx5e_tir *tir);
1075int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1076void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1077int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1078                       bool enable_mc_lb);
1079void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
1080
1081/* common netdev helpers */
1082void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1083void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1084int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1085                       struct mlx5e_rq *drop_rq);
1086void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1087int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
1088void mlx5e_free_di_list(struct mlx5e_rq *rq);
1089
1090int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1091
1092int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1093void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
1094
1095int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1096void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1097int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1098void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1099void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1100
1101int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1102void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1103
1104int mlx5e_create_tises(struct mlx5e_priv *priv);
1105void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1106int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1107void mlx5e_update_carrier(struct mlx5e_priv *priv);
1108int mlx5e_close(struct net_device *netdev);
1109int mlx5e_open(struct net_device *netdev);
1110
1111void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1112int mlx5e_bits_invert(unsigned long a, int size);
1113
1114int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1115int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1116int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1117                     mlx5e_fp_preactivate preactivate);
1118void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
1119
1120/* ethtool helpers */
1121void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1122                               struct ethtool_drvinfo *drvinfo);
1123void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1124                               uint32_t stringset, uint8_t *data);
1125int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1126void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1127                                     struct ethtool_stats *stats, u64 *data);
1128void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1129                                 struct ethtool_ringparam *param);
1130int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1131                                struct ethtool_ringparam *param);
1132void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1133                                struct ethtool_channels *ch);
1134int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1135                               struct ethtool_channels *ch);
1136int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1137                               struct ethtool_coalesce *coal);
1138int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1139                               struct ethtool_coalesce *coal);
1140int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1141                                     struct ethtool_link_ksettings *link_ksettings);
1142int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1143                                     const struct ethtool_link_ksettings *link_ksettings);
1144int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1145int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1146                   const u8 hfunc);
1147int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1148                    u32 *rule_locs);
1149int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1150u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1151u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1152int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1153                              struct ethtool_ts_info *info);
1154int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1155                               struct ethtool_flash *flash);
1156void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1157                                  struct ethtool_pauseparam *pauseparam);
1158int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1159                                 struct ethtool_pauseparam *pauseparam);
1160
1161/* mlx5e generic netdev management API */
1162static inline unsigned int
1163mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
1164{
1165        return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
1166}
1167
1168int mlx5e_priv_init(struct mlx5e_priv *priv,
1169                    struct net_device *netdev,
1170                    struct mlx5_core_dev *mdev);
1171void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
1172struct net_device *
1173mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
1174int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1175void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1176void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1177int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
1178                                const struct mlx5e_profile *new_profile, void *new_ppriv);
1179void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1180void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
1181void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1182                           struct mlx5e_params *params);
1183void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1184                            u16 num_channels);
1185void mlx5e_rx_dim_work(struct work_struct *work);
1186void mlx5e_tx_dim_work(struct work_struct *work);
1187
1188netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1189                                       struct net_device *netdev,
1190                                       netdev_features_t features);
1191int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1192#ifdef CONFIG_MLX5_ESWITCH
1193int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1194int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1195int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1196int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1197#endif
1198#endif /* __MLX5_EN_H__ */
1199