linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/ptp_clock_kernel.h>
  40#include <linux/crash_dump.h>
  41#include <linux/mlx5/driver.h>
  42#include <linux/mlx5/qp.h>
  43#include <linux/mlx5/cq.h>
  44#include <linux/mlx5/port.h>
  45#include <linux/mlx5/vport.h>
  46#include <linux/mlx5/transobj.h>
  47#include <linux/mlx5/fs.h>
  48#include <linux/rhashtable.h>
  49#include <net/switchdev.h>
  50#include <net/xdp.h>
  51#include <linux/net_dim.h>
  52#include "wq.h"
  53#include "mlx5_core.h"
  54#include "en_stats.h"
  55#include "en/fs.h"
  56
  57extern const struct net_device_ops mlx5e_netdev_ops;
  58struct page_pool;
  59
  60#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
  61#define MLX5E_METADATA_ETHER_LEN 8
  62
  63#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  64
  65#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  66
  67#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
  68#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
  69
  70#define MLX5E_MAX_PRIORITY      8
  71#define MLX5E_MAX_DSCP          64
  72#define MLX5E_MAX_NUM_TC        8
  73
  74#define MLX5_RX_HEADROOM NET_SKB_PAD
  75#define MLX5_SKB_FRAG_SZ(len)   (SKB_DATA_ALIGN(len) +  \
  76                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  77
  78#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
  79        (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
  80#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
  81        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
  82#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
  83#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
  84#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
  85        (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
  86        MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
  87
  88#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  89#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  90                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  91#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  92
  93#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  94#define MLX5E_REQUIRED_WQE_MTTS         (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
  95#define MLX5E_LOG_ALIGNED_MPWQE_PPW     (ilog2(MLX5E_REQUIRED_WQE_MTTS))
  96#define MLX5E_REQUIRED_MTTS(wqes)       (wqes * MLX5E_REQUIRED_WQE_MTTS)
  97#define MLX5E_MAX_RQ_NUM_MTTS   \
  98        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
  99#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
 100#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW    \
 101                (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
 102#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
 103        (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
 104         (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
 105
 106#define MLX5E_MIN_SKB_FRAG_SZ           (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
 107#define MLX5E_LOG_MAX_RX_WQE_BULK       \
 108        (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
 109
 110#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 111#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 112#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 113
 114#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
 115#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 116#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
 117                                               MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
 118
 119#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
 120
 121#define MLX5E_RX_MAX_HEAD (256)
 122
 123#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 124#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
 125#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
 126
 127#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 128#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
 129#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 130#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 131#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
 132#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 133#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 134#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 135
 136#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 137#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 138#define MLX5E_MIN_NUM_CHANNELS         0x1
 139#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 140#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 141#define MLX5E_TX_CQ_POLL_BUDGET        128
 142#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 143
 144#define MLX5E_UMR_WQE_INLINE_SZ \
 145        (sizeof(struct mlx5e_umr_wqe) + \
 146         ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
 147               MLX5_UMR_MTT_ALIGNMENT))
 148#define MLX5E_UMR_WQEBBS \
 149        (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
 150#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
 151
 152#define MLX5E_NUM_MAIN_GROUPS 9
 153
 154#define MLX5E_MSG_LEVEL                 NETIF_MSG_LINK
 155
 156#define mlx5e_dbg(mlevel, priv, format, ...)                    \
 157do {                                                            \
 158        if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
 159                netdev_warn(priv->netdev, format,               \
 160                            ##__VA_ARGS__);                     \
 161} while (0)
 162
 163
 164static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 165{
 166        switch (wq_type) {
 167        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 168                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 169                             wq_size / 2);
 170        default:
 171                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 172                             wq_size / 2);
 173        }
 174}
 175
 176static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 177{
 178        return is_kdump_kernel() ?
 179                MLX5E_MIN_NUM_CHANNELS :
 180                min_t(int, mdev->priv.eq_table.num_comp_vectors,
 181                      MLX5E_MAX_NUM_CHANNELS);
 182}
 183
 184struct mlx5e_tx_wqe {
 185        struct mlx5_wqe_ctrl_seg ctrl;
 186        struct mlx5_wqe_eth_seg  eth;
 187        struct mlx5_wqe_data_seg data[0];
 188};
 189
 190struct mlx5e_rx_wqe_ll {
 191        struct mlx5_wqe_srq_next_seg  next;
 192        struct mlx5_wqe_data_seg      data[0];
 193};
 194
 195struct mlx5e_rx_wqe_cyc {
 196        struct mlx5_wqe_data_seg      data[0];
 197};
 198
 199struct mlx5e_umr_wqe {
 200        struct mlx5_wqe_ctrl_seg       ctrl;
 201        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 202        struct mlx5_mkey_seg           mkc;
 203        struct mlx5_mtt                inline_mtts[0];
 204};
 205
 206extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
 207
 208static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
 209        "rx_cqe_moder",
 210        "tx_cqe_moder",
 211        "rx_cqe_compress",
 212        "rx_striding_rq",
 213};
 214
 215enum mlx5e_priv_flag {
 216        MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
 217        MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
 218        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
 219        MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
 220};
 221
 222#define MLX5E_SET_PFLAG(params, pflag, enable)                  \
 223        do {                                                    \
 224                if (enable)                                     \
 225                        (params)->pflags |= (pflag);            \
 226                else                                            \
 227                        (params)->pflags &= ~(pflag);           \
 228        } while (0)
 229
 230#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
 231
 232#ifdef CONFIG_MLX5_CORE_EN_DCB
 233#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 234#endif
 235
 236struct mlx5e_params {
 237        u8  log_sq_size;
 238        u8  rq_wq_type;
 239        u8  log_rq_mtu_frames;
 240        u16 num_channels;
 241        u8  num_tc;
 242        bool rx_cqe_compress_def;
 243        struct net_dim_cq_moder rx_cq_moderation;
 244        struct net_dim_cq_moder tx_cq_moderation;
 245        bool lro_en;
 246        u32 lro_wqe_sz;
 247        u8  tx_min_inline_mode;
 248        u8  rss_hfunc;
 249        u8  toeplitz_hash_key[40];
 250        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 251        bool vlan_strip_disable;
 252        bool scatter_fcs_en;
 253        bool rx_dim_enabled;
 254        bool tx_dim_enabled;
 255        u32 lro_timeout;
 256        u32 pflags;
 257        struct bpf_prog *xdp_prog;
 258        unsigned int sw_mtu;
 259        int hard_mtu;
 260};
 261
 262#ifdef CONFIG_MLX5_CORE_EN_DCB
 263struct mlx5e_cee_config {
 264        /* bw pct for priority group */
 265        u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
 266        u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
 267        bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
 268        bool                       pfc_enable;
 269};
 270
 271enum {
 272        MLX5_DCB_CHG_RESET,
 273        MLX5_DCB_NO_CHG,
 274        MLX5_DCB_CHG_NO_RESET,
 275};
 276
 277struct mlx5e_dcbx {
 278        enum mlx5_dcbx_oper_mode   mode;
 279        struct mlx5e_cee_config    cee_cfg; /* pending configuration */
 280        u8                         dscp_app_cnt;
 281
 282        /* The only setting that cannot be read from FW */
 283        u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
 284        u8                         cap;
 285
 286        /* Buffer configuration */
 287        bool                       manual_buffer;
 288        u32                        cable_len;
 289        u32                        xoff;
 290};
 291
 292struct mlx5e_dcbx_dp {
 293        u8                         dscp2prio[MLX5E_MAX_DSCP];
 294        u8                         trust_state;
 295};
 296#endif
 297
 298enum {
 299        MLX5E_RQ_STATE_ENABLED,
 300        MLX5E_RQ_STATE_AM,
 301};
 302
 303struct mlx5e_cq {
 304        /* data path - accessed per cqe */
 305        struct mlx5_cqwq           wq;
 306
 307        /* data path - accessed per napi poll */
 308        u16                        event_ctr;
 309        struct napi_struct        *napi;
 310        struct mlx5_core_cq        mcq;
 311        struct mlx5e_channel      *channel;
 312
 313        /* cqe decompression */
 314        struct mlx5_cqe64          title;
 315        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 316        u8                         mini_arr_idx;
 317        u16                        decmprs_left;
 318        u16                        decmprs_wqe_counter;
 319
 320        /* control */
 321        struct mlx5_core_dev      *mdev;
 322        struct mlx5_wq_ctrl        wq_ctrl;
 323} ____cacheline_aligned_in_smp;
 324
 325struct mlx5e_tx_wqe_info {
 326        struct sk_buff *skb;
 327        u32 num_bytes;
 328        u8  num_wqebbs;
 329        u8  num_dma;
 330};
 331
 332enum mlx5e_dma_map_type {
 333        MLX5E_DMA_MAP_SINGLE,
 334        MLX5E_DMA_MAP_PAGE
 335};
 336
 337struct mlx5e_sq_dma {
 338        dma_addr_t              addr;
 339        u32                     size;
 340        enum mlx5e_dma_map_type type;
 341};
 342
 343enum {
 344        MLX5E_SQ_STATE_ENABLED,
 345        MLX5E_SQ_STATE_RECOVERING,
 346        MLX5E_SQ_STATE_IPSEC,
 347        MLX5E_SQ_STATE_AM,
 348        MLX5E_SQ_STATE_TLS,
 349        MLX5E_SQ_STATE_REDIRECT,
 350};
 351
 352struct mlx5e_sq_wqe_info {
 353        u8  opcode;
 354};
 355
 356struct mlx5e_txqsq {
 357        /* data path */
 358
 359        /* dirtied @completion */
 360        u16                        cc;
 361        u32                        dma_fifo_cc;
 362        struct net_dim             dim; /* Adaptive Moderation */
 363
 364        /* dirtied @xmit */
 365        u16                        pc ____cacheline_aligned_in_smp;
 366        u32                        dma_fifo_pc;
 367
 368        struct mlx5e_cq            cq;
 369
 370        /* read only */
 371        struct mlx5_wq_cyc         wq;
 372        u32                        dma_fifo_mask;
 373        struct mlx5e_sq_stats     *stats;
 374        struct {
 375                struct mlx5e_sq_dma       *dma_fifo;
 376                struct mlx5e_tx_wqe_info  *wqe_info;
 377        } db;
 378        void __iomem              *uar_map;
 379        struct netdev_queue       *txq;
 380        u32                        sqn;
 381        u8                         min_inline_mode;
 382        struct device             *pdev;
 383        __be32                     mkey_be;
 384        unsigned long              state;
 385        struct hwtstamp_config    *tstamp;
 386        struct mlx5_clock         *clock;
 387
 388        /* control path */
 389        struct mlx5_wq_ctrl        wq_ctrl;
 390        struct mlx5e_channel      *channel;
 391        int                        txq_ix;
 392        u32                        rate_limit;
 393        struct mlx5e_txqsq_recover {
 394                struct work_struct         recover_work;
 395                u64                        last_recover;
 396        } recover;
 397} ____cacheline_aligned_in_smp;
 398
 399struct mlx5e_dma_info {
 400        struct page     *page;
 401        dma_addr_t      addr;
 402};
 403
 404struct mlx5e_xdp_info {
 405        struct xdp_frame      *xdpf;
 406        dma_addr_t            dma_addr;
 407        struct mlx5e_dma_info di;
 408};
 409
 410struct mlx5e_xdpsq {
 411        /* data path */
 412
 413        /* dirtied @completion */
 414        u16                        cc;
 415        bool                       redirect_flush;
 416
 417        /* dirtied @xmit */
 418        u16                        pc ____cacheline_aligned_in_smp;
 419        bool                       doorbell;
 420
 421        struct mlx5e_cq            cq;
 422
 423        /* read only */
 424        struct mlx5_wq_cyc         wq;
 425        struct mlx5e_xdpsq_stats  *stats;
 426        struct {
 427                struct mlx5e_xdp_info     *xdpi;
 428        } db;
 429        void __iomem              *uar_map;
 430        u32                        sqn;
 431        struct device             *pdev;
 432        __be32                     mkey_be;
 433        u8                         min_inline_mode;
 434        unsigned long              state;
 435        unsigned int               hw_mtu;
 436
 437        /* control path */
 438        struct mlx5_wq_ctrl        wq_ctrl;
 439        struct mlx5e_channel      *channel;
 440} ____cacheline_aligned_in_smp;
 441
 442struct mlx5e_icosq {
 443        /* data path */
 444
 445        /* dirtied @xmit */
 446        u16                        pc ____cacheline_aligned_in_smp;
 447
 448        struct mlx5e_cq            cq;
 449
 450        /* write@xmit, read@completion */
 451        struct {
 452                struct mlx5e_sq_wqe_info *ico_wqe;
 453        } db;
 454
 455        /* read only */
 456        struct mlx5_wq_cyc         wq;
 457        void __iomem              *uar_map;
 458        u32                        sqn;
 459        unsigned long              state;
 460
 461        /* control path */
 462        struct mlx5_wq_ctrl        wq_ctrl;
 463        struct mlx5e_channel      *channel;
 464} ____cacheline_aligned_in_smp;
 465
 466static inline bool
 467mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
 468{
 469        return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
 470}
 471
 472struct mlx5e_wqe_frag_info {
 473        struct mlx5e_dma_info *di;
 474        u32 offset;
 475        bool last_in_page;
 476};
 477
 478struct mlx5e_umr_dma_info {
 479        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 480};
 481
 482struct mlx5e_mpw_info {
 483        struct mlx5e_umr_dma_info umr;
 484        u16 consumed_strides;
 485        DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 486};
 487
 488#define MLX5E_MAX_RX_FRAGS 4
 489
 490/* a single cache unit is capable to serve one napi call (for non-striding rq)
 491 * or a MPWQE (for striding rq).
 492 */
 493#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 494                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 495#define MLX5E_CACHE_SIZE        (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 496struct mlx5e_page_cache {
 497        u32 head;
 498        u32 tail;
 499        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 500};
 501
 502struct mlx5e_rq;
 503typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 504typedef struct sk_buff *
 505(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 506                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 507typedef struct sk_buff *
 508(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 509                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 510typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 511typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 512
 513enum mlx5e_rq_flag {
 514        MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
 515};
 516
 517struct mlx5e_rq_frag_info {
 518        int frag_size;
 519        int frag_stride;
 520};
 521
 522struct mlx5e_rq_frags_info {
 523        struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
 524        u8 num_frags;
 525        u8 log_num_frags;
 526        u8 wqe_bulk;
 527};
 528
 529struct mlx5e_rq {
 530        /* data path */
 531        union {
 532                struct {
 533                        struct mlx5_wq_cyc          wq;
 534                        struct mlx5e_wqe_frag_info *frags;
 535                        struct mlx5e_dma_info      *di;
 536                        struct mlx5e_rq_frags_info  info;
 537                        mlx5e_fp_skb_from_cqe       skb_from_cqe;
 538                } wqe;
 539                struct {
 540                        struct mlx5_wq_ll      wq;
 541                        struct mlx5e_umr_wqe   umr_wqe;
 542                        struct mlx5e_mpw_info *info;
 543                        mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
 544                        u16                    num_strides;
 545                        u8                     log_stride_sz;
 546                        bool                   umr_in_progress;
 547                } mpwqe;
 548        };
 549        struct {
 550                u16            headroom;
 551                u8             map_dir;   /* dma map direction */
 552        } buff;
 553
 554        struct mlx5e_channel  *channel;
 555        struct device         *pdev;
 556        struct net_device     *netdev;
 557        struct mlx5e_rq_stats *stats;
 558        struct mlx5e_cq        cq;
 559        struct mlx5e_page_cache page_cache;
 560        struct hwtstamp_config *tstamp;
 561        struct mlx5_clock      *clock;
 562
 563        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 564        mlx5e_fp_post_rx_wqes  post_wqes;
 565        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 566
 567        unsigned long          state;
 568        int                    ix;
 569
 570        struct net_dim         dim; /* Dynamic Interrupt Moderation */
 571
 572        /* XDP */
 573        struct bpf_prog       *xdp_prog;
 574        struct mlx5e_xdpsq     xdpsq;
 575        DECLARE_BITMAP(flags, 8);
 576        struct page_pool      *page_pool;
 577
 578        /* control */
 579        struct mlx5_wq_ctrl    wq_ctrl;
 580        __be32                 mkey_be;
 581        u8                     wq_type;
 582        u32                    rqn;
 583        struct mlx5_core_dev  *mdev;
 584        struct mlx5_core_mkey  umr_mkey;
 585
 586        /* XDP read-mostly */
 587        struct xdp_rxq_info    xdp_rxq;
 588} ____cacheline_aligned_in_smp;
 589
 590struct mlx5e_channel {
 591        /* data path */
 592        struct mlx5e_rq            rq;
 593        struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
 594        struct mlx5e_icosq         icosq;   /* internal control operations */
 595        bool                       xdp;
 596        struct napi_struct         napi;
 597        struct device             *pdev;
 598        struct net_device         *netdev;
 599        __be32                     mkey_be;
 600        u8                         num_tc;
 601
 602        /* XDP_REDIRECT */
 603        struct mlx5e_xdpsq         xdpsq;
 604
 605        /* data path - accessed per napi poll */
 606        struct irq_desc *irq_desc;
 607        struct mlx5e_ch_stats     *stats;
 608
 609        /* control */
 610        struct mlx5e_priv         *priv;
 611        struct mlx5_core_dev      *mdev;
 612        struct hwtstamp_config    *tstamp;
 613        int                        ix;
 614        int                        cpu;
 615};
 616
 617struct mlx5e_channels {
 618        struct mlx5e_channel **c;
 619        unsigned int           num;
 620        struct mlx5e_params    params;
 621};
 622
 623struct mlx5e_channel_stats {
 624        struct mlx5e_ch_stats ch;
 625        struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 626        struct mlx5e_rq_stats rq;
 627        struct mlx5e_xdpsq_stats rq_xdpsq;
 628        struct mlx5e_xdpsq_stats xdpsq;
 629} ____cacheline_aligned_in_smp;
 630
 631enum {
 632        MLX5E_STATE_ASYNC_EVENTS_ENABLED,
 633        MLX5E_STATE_OPENED,
 634        MLX5E_STATE_DESTROYING,
 635};
 636
 637struct mlx5e_rqt {
 638        u32              rqtn;
 639        bool             enabled;
 640};
 641
 642struct mlx5e_tir {
 643        u32               tirn;
 644        struct mlx5e_rqt  rqt;
 645        struct list_head  list;
 646};
 647
 648enum {
 649        MLX5E_TC_PRIO = 0,
 650        MLX5E_NIC_PRIO
 651};
 652
 653struct mlx5e_priv {
 654        /* priv data path fields - start */
 655        struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
 656        int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 657#ifdef CONFIG_MLX5_CORE_EN_DCB
 658        struct mlx5e_dcbx_dp       dcbx_dp;
 659#endif
 660        /* priv data path fields - end */
 661
 662        u32                        msglevel;
 663        unsigned long              state;
 664        struct mutex               state_lock; /* Protects Interface state */
 665        struct mlx5e_rq            drop_rq;
 666
 667        struct mlx5e_channels      channels;
 668        u32                        tisn[MLX5E_MAX_NUM_TC];
 669        struct mlx5e_rqt           indir_rqt;
 670        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 671        struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
 672        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 673        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 674
 675        struct mlx5e_flow_steering fs;
 676
 677        struct workqueue_struct    *wq;
 678        struct work_struct         update_carrier_work;
 679        struct work_struct         set_rx_mode_work;
 680        struct work_struct         tx_timeout_work;
 681        struct delayed_work        update_stats_work;
 682
 683        struct mlx5_core_dev      *mdev;
 684        struct net_device         *netdev;
 685        struct mlx5e_stats         stats;
 686        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
 687        u8                         max_opened_tc;
 688        struct hwtstamp_config     tstamp;
 689        u16                        q_counter;
 690        u16                        drop_rq_q_counter;
 691#ifdef CONFIG_MLX5_CORE_EN_DCB
 692        struct mlx5e_dcbx          dcbx;
 693#endif
 694
 695        const struct mlx5e_profile *profile;
 696        void                      *ppriv;
 697#ifdef CONFIG_MLX5_EN_IPSEC
 698        struct mlx5e_ipsec        *ipsec;
 699#endif
 700#ifdef CONFIG_MLX5_EN_TLS
 701        struct mlx5e_tls          *tls;
 702#endif
 703};
 704
 705struct mlx5e_profile {
 706        void    (*init)(struct mlx5_core_dev *mdev,
 707                        struct net_device *netdev,
 708                        const struct mlx5e_profile *profile, void *ppriv);
 709        void    (*cleanup)(struct mlx5e_priv *priv);
 710        int     (*init_rx)(struct mlx5e_priv *priv);
 711        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 712        int     (*init_tx)(struct mlx5e_priv *priv);
 713        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 714        void    (*enable)(struct mlx5e_priv *priv);
 715        void    (*disable)(struct mlx5e_priv *priv);
 716        void    (*update_stats)(struct mlx5e_priv *priv);
 717        void    (*update_carrier)(struct mlx5e_priv *priv);
 718        int     (*max_nch)(struct mlx5_core_dev *mdev);
 719        struct {
 720                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 721                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
 722        } rx_handlers;
 723        int     max_tc;
 724};
 725
 726void mlx5e_build_ptys2ethtool_map(void);
 727
 728u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 729                       struct net_device *sb_dev,
 730                       select_queue_fallback_t fallback);
 731netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 732netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 733                          struct mlx5e_tx_wqe *wqe, u16 pi);
 734
 735void mlx5e_completion_event(struct mlx5_core_cq *mcq);
 736void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 737int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 738bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 739int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 740void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
 741
 742bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 743bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
 744                                struct mlx5e_params *params);
 745
 746void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
 747void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
 748                        bool recycle);
 749void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 750void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 751bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 752bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
 753void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 754void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 755struct sk_buff *
 756mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 757                                u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 758struct sk_buff *
 759mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 760                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 761struct sk_buff *
 762mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 763                          struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 764struct sk_buff *
 765mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 766                             struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 767
 768void mlx5e_update_stats(struct mlx5e_priv *priv);
 769
 770void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 771int mlx5e_self_test_num(struct mlx5e_priv *priv);
 772void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
 773                     u64 *buf);
 774void mlx5e_set_rx_mode_work(struct work_struct *work);
 775
 776int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 777int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
 778int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 779
 780int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 781                          u16 vid);
 782int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 783                           u16 vid);
 784void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 785
 786struct mlx5e_redirect_rqt_param {
 787        bool is_rss;
 788        union {
 789                u32 rqn; /* Direct RQN (Non-RSS) */
 790                struct {
 791                        u8 hfunc;
 792                        struct mlx5e_channels *channels;
 793                } rss; /* RSS data */
 794        };
 795};
 796
 797int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
 798                       struct mlx5e_redirect_rqt_param rrp);
 799void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
 800                                    enum mlx5e_traffic_types tt,
 801                                    void *tirc, bool inner);
 802
 803int mlx5e_open_locked(struct net_device *netdev);
 804int mlx5e_close_locked(struct net_device *netdev);
 805
 806int mlx5e_open_channels(struct mlx5e_priv *priv,
 807                        struct mlx5e_channels *chs);
 808void mlx5e_close_channels(struct mlx5e_channels *chs);
 809
 810/* Function pointer to be used to modify WH settings while
 811 * switching channels
 812 */
 813typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
 814void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
 815                                struct mlx5e_channels *new_chs,
 816                                mlx5e_fp_hw_modify hw_modify);
 817void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
 818void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 819
 820void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
 821                                   int num_channels);
 822void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
 823                                 u8 cq_period_mode);
 824void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
 825                                 u8 cq_period_mode);
 826void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 827void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
 828                               struct mlx5e_params *params);
 829
 830static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 831{
 832        return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
 833                MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
 834}
 835
 836static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
 837                                      struct mlx5e_tx_wqe **wqe,
 838                                      u16 *pi)
 839{
 840        struct mlx5_wq_cyc *wq = &sq->wq;
 841
 842        *pi  = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 843        *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
 844        memset(*wqe, 0, sizeof(**wqe));
 845}
 846
 847static inline
 848struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
 849{
 850        u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
 851        struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 852        struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
 853
 854        memset(cseg, 0, sizeof(*cseg));
 855
 856        cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
 857        cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
 858
 859        (*pc)++;
 860
 861        return wqe;
 862}
 863
 864static inline
 865void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
 866                     void __iomem *uar_map,
 867                     struct mlx5_wqe_ctrl_seg *ctrl)
 868{
 869        ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 870        /* ensure wqe is visible to device before updating doorbell record */
 871        dma_wmb();
 872
 873        *wq->db = cpu_to_be32(pc);
 874
 875        /* ensure doorbell record is visible to device before ringing the
 876         * doorbell
 877         */
 878        wmb();
 879
 880        mlx5_write64((__be32 *)ctrl, uar_map, NULL);
 881}
 882
 883static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 884{
 885        struct mlx5_core_cq *mcq;
 886
 887        mcq = &cq->mcq;
 888        mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
 889}
 890
 891extern const struct ethtool_ops mlx5e_ethtool_ops;
 892#ifdef CONFIG_MLX5_CORE_EN_DCB
 893extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 894int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 895void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
 896void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
 897void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
 898#endif
 899
 900int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 901                     struct mlx5e_tir *tir, u32 *in, int inlen);
 902void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
 903                       struct mlx5e_tir *tir);
 904int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
 905void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 906int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
 907
 908/* common netdev helpers */
 909int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
 910
 911int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
 912void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
 913
 914int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
 915void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
 916int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
 917void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
 918void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
 919
 920int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
 921                     u32 underlay_qpn, u32 *tisn);
 922void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
 923
 924int mlx5e_create_tises(struct mlx5e_priv *priv);
 925void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
 926int mlx5e_close(struct net_device *netdev);
 927int mlx5e_open(struct net_device *netdev);
 928void mlx5e_update_stats_work(struct work_struct *work);
 929
 930int mlx5e_bits_invert(unsigned long a, int size);
 931
 932typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
 933int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
 934                     change_hw_mtu_cb set_mtu_cb);
 935
 936/* ethtool helpers */
 937void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
 938                               struct ethtool_drvinfo *drvinfo);
 939void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
 940                               uint32_t stringset, uint8_t *data);
 941int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
 942void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
 943                                     struct ethtool_stats *stats, u64 *data);
 944void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
 945                                 struct ethtool_ringparam *param);
 946int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 947                                struct ethtool_ringparam *param);
 948void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
 949                                struct ethtool_channels *ch);
 950int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
 951                               struct ethtool_channels *ch);
 952int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
 953                               struct ethtool_coalesce *coal);
 954int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
 955                               struct ethtool_coalesce *coal);
 956int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
 957                              struct ethtool_ts_info *info);
 958int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
 959                               struct ethtool_flash *flash);
 960
 961/* mlx5e generic netdev management API */
 962struct net_device*
 963mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
 964                    void *ppriv);
 965int mlx5e_attach_netdev(struct mlx5e_priv *priv);
 966void mlx5e_detach_netdev(struct mlx5e_priv *priv);
 967void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
 968void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 969                            struct mlx5e_params *params,
 970                            u16 max_channels, u16 mtu);
 971u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
 972void mlx5e_rx_dim_work(struct work_struct *work);
 973void mlx5e_tx_dim_work(struct work_struct *work);
 974#endif /* __MLX5_EN_H__ */
 975