linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/ptp_clock_kernel.h>
  40#include <linux/crash_dump.h>
  41#include <linux/mlx5/driver.h>
  42#include <linux/mlx5/qp.h>
  43#include <linux/mlx5/cq.h>
  44#include <linux/mlx5/port.h>
  45#include <linux/mlx5/vport.h>
  46#include <linux/mlx5/transobj.h>
  47#include <linux/mlx5/fs.h>
  48#include <linux/rhashtable.h>
  49#include <net/switchdev.h>
  50#include <net/xdp.h>
  51#include <linux/net_dim.h>
  52#include "wq.h"
  53#include "mlx5_core.h"
  54#include "en_stats.h"
  55#include "en/fs.h"
  56
  57extern const struct net_device_ops mlx5e_netdev_ops;
  58struct page_pool;
  59
  60#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
  61#define MLX5E_METADATA_ETHER_LEN 8
  62
  63#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  64
  65#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  66
  67#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
  68#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
  69
  70#define MLX5E_MAX_PRIORITY      8
  71#define MLX5E_MAX_DSCP          64
  72#define MLX5E_MAX_NUM_TC        8
  73
  74#define MLX5_RX_HEADROOM NET_SKB_PAD
  75#define MLX5_SKB_FRAG_SZ(len)   (SKB_DATA_ALIGN(len) +  \
  76                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  77
  78#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
  79        (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
  80#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
  81        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
  82#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
  83#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
  84#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
  85        (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
  86        MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
  87
  88#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  89#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  90                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  91#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  92
  93#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  94#define MLX5E_REQUIRED_WQE_MTTS         (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
  95#define MLX5E_LOG_ALIGNED_MPWQE_PPW     (ilog2(MLX5E_REQUIRED_WQE_MTTS))
  96#define MLX5E_REQUIRED_MTTS(wqes)       (wqes * MLX5E_REQUIRED_WQE_MTTS)
  97#define MLX5E_MAX_RQ_NUM_MTTS   \
  98        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
  99#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
 100#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW    \
 101                (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
 102#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
 103        (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
 104         (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
 105
 106#define MLX5E_MIN_SKB_FRAG_SZ           (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
 107#define MLX5E_LOG_MAX_RX_WQE_BULK       \
 108        (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
 109
 110#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 111#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 112#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 113
 114#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
 115#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 116#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
 117                                               MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
 118
 119#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
 120
 121#define MLX5E_RX_MAX_HEAD (256)
 122
 123#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 124#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
 125#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
 126
 127#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 128#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
 129#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 130#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 131#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
 132#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 133#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 134#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 135
 136#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 137#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 138#define MLX5E_MIN_NUM_CHANNELS         0x1
 139#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 140#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 141#define MLX5E_TX_CQ_POLL_BUDGET        128
 142#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 143
 144#define MLX5E_UMR_WQE_INLINE_SZ \
 145        (sizeof(struct mlx5e_umr_wqe) + \
 146         ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
 147               MLX5_UMR_MTT_ALIGNMENT))
 148#define MLX5E_UMR_WQEBBS \
 149        (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
 150#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
 151
 152#define MLX5E_NUM_MAIN_GROUPS 9
 153
 154#define MLX5E_MSG_LEVEL                 NETIF_MSG_LINK
 155
 156#define mlx5e_dbg(mlevel, priv, format, ...)                    \
 157do {                                                            \
 158        if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
 159                netdev_warn(priv->netdev, format,               \
 160                            ##__VA_ARGS__);                     \
 161} while (0)
 162
 163
 164static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 165{
 166        switch (wq_type) {
 167        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 168                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 169                             wq_size / 2);
 170        default:
 171                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 172                             wq_size / 2);
 173        }
 174}
 175
 176/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
 177static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 178{
 179        return is_kdump_kernel() ?
 180                MLX5E_MIN_NUM_CHANNELS :
 181                min_t(int, mdev->priv.eq_table.num_comp_vectors,
 182                      MLX5E_MAX_NUM_CHANNELS);
 183}
 184
 185/* Use this function to get max num channels after netdev was created */
 186static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
 187{
 188        return min_t(unsigned int, netdev->num_rx_queues,
 189                     netdev->num_tx_queues);
 190}
 191
 192struct mlx5e_tx_wqe {
 193        struct mlx5_wqe_ctrl_seg ctrl;
 194        struct mlx5_wqe_eth_seg  eth;
 195        struct mlx5_wqe_data_seg data[0];
 196};
 197
 198struct mlx5e_rx_wqe_ll {
 199        struct mlx5_wqe_srq_next_seg  next;
 200        struct mlx5_wqe_data_seg      data[0];
 201};
 202
 203struct mlx5e_rx_wqe_cyc {
 204        struct mlx5_wqe_data_seg      data[0];
 205};
 206
 207struct mlx5e_umr_wqe {
 208        struct mlx5_wqe_ctrl_seg       ctrl;
 209        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 210        struct mlx5_mkey_seg           mkc;
 211        struct mlx5_mtt                inline_mtts[0];
 212};
 213
 214extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
 215
 216enum mlx5e_priv_flag {
 217        MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
 218        MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
 219        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
 220        MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
 221        MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
 222};
 223
 224#define MLX5E_SET_PFLAG(params, pflag, enable)                  \
 225        do {                                                    \
 226                if (enable)                                     \
 227                        (params)->pflags |= (pflag);            \
 228                else                                            \
 229                        (params)->pflags &= ~(pflag);           \
 230        } while (0)
 231
 232#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
 233
 234#ifdef CONFIG_MLX5_CORE_EN_DCB
 235#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 236#endif
 237
 238struct mlx5e_params {
 239        u8  log_sq_size;
 240        u8  rq_wq_type;
 241        u8  log_rq_mtu_frames;
 242        u16 num_channels;
 243        u8  num_tc;
 244        bool rx_cqe_compress_def;
 245        struct net_dim_cq_moder rx_cq_moderation;
 246        struct net_dim_cq_moder tx_cq_moderation;
 247        bool lro_en;
 248        u32 lro_wqe_sz;
 249        u8  tx_min_inline_mode;
 250        u8  rss_hfunc;
 251        u8  toeplitz_hash_key[40];
 252        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 253        bool vlan_strip_disable;
 254        bool scatter_fcs_en;
 255        bool rx_dim_enabled;
 256        bool tx_dim_enabled;
 257        u32 lro_timeout;
 258        u32 pflags;
 259        struct bpf_prog *xdp_prog;
 260        unsigned int sw_mtu;
 261        int hard_mtu;
 262};
 263
 264#ifdef CONFIG_MLX5_CORE_EN_DCB
 265struct mlx5e_cee_config {
 266        /* bw pct for priority group */
 267        u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
 268        u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
 269        bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
 270        bool                       pfc_enable;
 271};
 272
 273enum {
 274        MLX5_DCB_CHG_RESET,
 275        MLX5_DCB_NO_CHG,
 276        MLX5_DCB_CHG_NO_RESET,
 277};
 278
 279struct mlx5e_dcbx {
 280        enum mlx5_dcbx_oper_mode   mode;
 281        struct mlx5e_cee_config    cee_cfg; /* pending configuration */
 282        u8                         dscp_app_cnt;
 283
 284        /* The only setting that cannot be read from FW */
 285        u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
 286        u8                         cap;
 287
 288        /* Buffer configuration */
 289        bool                       manual_buffer;
 290        u32                        cable_len;
 291        u32                        xoff;
 292};
 293
 294struct mlx5e_dcbx_dp {
 295        u8                         dscp2prio[MLX5E_MAX_DSCP];
 296        u8                         trust_state;
 297};
 298#endif
 299
 300enum {
 301        MLX5E_RQ_STATE_ENABLED,
 302        MLX5E_RQ_STATE_AM,
 303        MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 304};
 305
 306struct mlx5e_cq {
 307        /* data path - accessed per cqe */
 308        struct mlx5_cqwq           wq;
 309
 310        /* data path - accessed per napi poll */
 311        u16                        event_ctr;
 312        struct napi_struct        *napi;
 313        struct mlx5_core_cq        mcq;
 314        struct mlx5e_channel      *channel;
 315
 316        /* cqe decompression */
 317        struct mlx5_cqe64          title;
 318        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 319        u8                         mini_arr_idx;
 320        u16                        decmprs_left;
 321        u16                        decmprs_wqe_counter;
 322
 323        /* control */
 324        struct mlx5_core_dev      *mdev;
 325        struct mlx5_wq_ctrl        wq_ctrl;
 326} ____cacheline_aligned_in_smp;
 327
 328struct mlx5e_tx_wqe_info {
 329        struct sk_buff *skb;
 330        u32 num_bytes;
 331        u8  num_wqebbs;
 332        u8  num_dma;
 333};
 334
 335enum mlx5e_dma_map_type {
 336        MLX5E_DMA_MAP_SINGLE,
 337        MLX5E_DMA_MAP_PAGE
 338};
 339
 340struct mlx5e_sq_dma {
 341        dma_addr_t              addr;
 342        u32                     size;
 343        enum mlx5e_dma_map_type type;
 344};
 345
 346enum {
 347        MLX5E_SQ_STATE_ENABLED,
 348        MLX5E_SQ_STATE_RECOVERING,
 349        MLX5E_SQ_STATE_IPSEC,
 350        MLX5E_SQ_STATE_AM,
 351        MLX5E_SQ_STATE_TLS,
 352        MLX5E_SQ_STATE_REDIRECT,
 353};
 354
 355struct mlx5e_sq_wqe_info {
 356        u8  opcode;
 357};
 358
 359struct mlx5e_txqsq {
 360        /* data path */
 361
 362        /* dirtied @completion */
 363        u16                        cc;
 364        u32                        dma_fifo_cc;
 365        struct net_dim             dim; /* Adaptive Moderation */
 366
 367        /* dirtied @xmit */
 368        u16                        pc ____cacheline_aligned_in_smp;
 369        u32                        dma_fifo_pc;
 370
 371        struct mlx5e_cq            cq;
 372
 373        /* read only */
 374        struct mlx5_wq_cyc         wq;
 375        u32                        dma_fifo_mask;
 376        struct mlx5e_sq_stats     *stats;
 377        struct {
 378                struct mlx5e_sq_dma       *dma_fifo;
 379                struct mlx5e_tx_wqe_info  *wqe_info;
 380        } db;
 381        void __iomem              *uar_map;
 382        struct netdev_queue       *txq;
 383        u32                        sqn;
 384        u8                         min_inline_mode;
 385        struct device             *pdev;
 386        __be32                     mkey_be;
 387        unsigned long              state;
 388        struct hwtstamp_config    *tstamp;
 389        struct mlx5_clock         *clock;
 390
 391        /* control path */
 392        struct mlx5_wq_ctrl        wq_ctrl;
 393        struct mlx5e_channel      *channel;
 394        int                        txq_ix;
 395        u32                        rate_limit;
 396        struct mlx5e_txqsq_recover {
 397                struct work_struct         recover_work;
 398                u64                        last_recover;
 399        } recover;
 400} ____cacheline_aligned_in_smp;
 401
 402struct mlx5e_dma_info {
 403        struct page     *page;
 404        dma_addr_t      addr;
 405};
 406
 407struct mlx5e_xdp_info {
 408        struct xdp_frame      *xdpf;
 409        dma_addr_t            dma_addr;
 410        struct mlx5e_dma_info di;
 411};
 412
 413struct mlx5e_xdpsq {
 414        /* data path */
 415
 416        /* dirtied @completion */
 417        u16                        cc;
 418        bool                       redirect_flush;
 419
 420        /* dirtied @xmit */
 421        u16                        pc ____cacheline_aligned_in_smp;
 422        bool                       doorbell;
 423
 424        struct mlx5e_cq            cq;
 425
 426        /* read only */
 427        struct mlx5_wq_cyc         wq;
 428        struct mlx5e_xdpsq_stats  *stats;
 429        struct {
 430                struct mlx5e_xdp_info     *xdpi;
 431        } db;
 432        void __iomem              *uar_map;
 433        u32                        sqn;
 434        struct device             *pdev;
 435        __be32                     mkey_be;
 436        u8                         min_inline_mode;
 437        unsigned long              state;
 438        unsigned int               hw_mtu;
 439
 440        /* control path */
 441        struct mlx5_wq_ctrl        wq_ctrl;
 442        struct mlx5e_channel      *channel;
 443} ____cacheline_aligned_in_smp;
 444
 445struct mlx5e_icosq {
 446        /* data path */
 447
 448        /* dirtied @xmit */
 449        u16                        pc ____cacheline_aligned_in_smp;
 450
 451        struct mlx5e_cq            cq;
 452
 453        /* write@xmit, read@completion */
 454        struct {
 455                struct mlx5e_sq_wqe_info *ico_wqe;
 456        } db;
 457
 458        /* read only */
 459        struct mlx5_wq_cyc         wq;
 460        void __iomem              *uar_map;
 461        u32                        sqn;
 462        unsigned long              state;
 463
 464        /* control path */
 465        struct mlx5_wq_ctrl        wq_ctrl;
 466        struct mlx5e_channel      *channel;
 467} ____cacheline_aligned_in_smp;
 468
 469static inline bool
 470mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
 471{
 472        return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
 473}
 474
 475struct mlx5e_wqe_frag_info {
 476        struct mlx5e_dma_info *di;
 477        u32 offset;
 478        bool last_in_page;
 479};
 480
 481struct mlx5e_umr_dma_info {
 482        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 483};
 484
 485struct mlx5e_mpw_info {
 486        struct mlx5e_umr_dma_info umr;
 487        u16 consumed_strides;
 488        DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 489};
 490
 491#define MLX5E_MAX_RX_FRAGS 4
 492
 493/* a single cache unit is capable to serve one napi call (for non-striding rq)
 494 * or a MPWQE (for striding rq).
 495 */
 496#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 497                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 498#define MLX5E_CACHE_SIZE        (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 499struct mlx5e_page_cache {
 500        u32 head;
 501        u32 tail;
 502        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 503};
 504
 505struct mlx5e_rq;
 506typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 507typedef struct sk_buff *
 508(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 509                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 510typedef struct sk_buff *
 511(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 512                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 513typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 514typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 515
 516enum mlx5e_rq_flag {
 517        MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
 518};
 519
 520struct mlx5e_rq_frag_info {
 521        int frag_size;
 522        int frag_stride;
 523};
 524
 525struct mlx5e_rq_frags_info {
 526        struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
 527        u8 num_frags;
 528        u8 log_num_frags;
 529        u8 wqe_bulk;
 530};
 531
 532struct mlx5e_rq {
 533        /* data path */
 534        union {
 535                struct {
 536                        struct mlx5_wq_cyc          wq;
 537                        struct mlx5e_wqe_frag_info *frags;
 538                        struct mlx5e_dma_info      *di;
 539                        struct mlx5e_rq_frags_info  info;
 540                        mlx5e_fp_skb_from_cqe       skb_from_cqe;
 541                } wqe;
 542                struct {
 543                        struct mlx5_wq_ll      wq;
 544                        struct mlx5e_umr_wqe   umr_wqe;
 545                        struct mlx5e_mpw_info *info;
 546                        mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
 547                        u16                    num_strides;
 548                        u8                     log_stride_sz;
 549                        bool                   umr_in_progress;
 550                } mpwqe;
 551        };
 552        struct {
 553                u16            headroom;
 554                u8             map_dir;   /* dma map direction */
 555        } buff;
 556
 557        struct mlx5e_channel  *channel;
 558        struct device         *pdev;
 559        struct net_device     *netdev;
 560        struct mlx5e_rq_stats *stats;
 561        struct mlx5e_cq        cq;
 562        struct mlx5e_page_cache page_cache;
 563        struct hwtstamp_config *tstamp;
 564        struct mlx5_clock      *clock;
 565
 566        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 567        mlx5e_fp_post_rx_wqes  post_wqes;
 568        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 569
 570        unsigned long          state;
 571        int                    ix;
 572        unsigned int           hw_mtu;
 573
 574        struct net_dim         dim; /* Dynamic Interrupt Moderation */
 575
 576        /* XDP */
 577        struct bpf_prog       *xdp_prog;
 578        struct mlx5e_xdpsq     xdpsq;
 579        DECLARE_BITMAP(flags, 8);
 580        struct page_pool      *page_pool;
 581
 582        /* control */
 583        struct mlx5_wq_ctrl    wq_ctrl;
 584        __be32                 mkey_be;
 585        u8                     wq_type;
 586        u32                    rqn;
 587        struct mlx5_core_dev  *mdev;
 588        struct mlx5_core_mkey  umr_mkey;
 589
 590        /* XDP read-mostly */
 591        struct xdp_rxq_info    xdp_rxq;
 592} ____cacheline_aligned_in_smp;
 593
 594struct mlx5e_channel {
 595        /* data path */
 596        struct mlx5e_rq            rq;
 597        struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
 598        struct mlx5e_icosq         icosq;   /* internal control operations */
 599        bool                       xdp;
 600        struct napi_struct         napi;
 601        struct device             *pdev;
 602        struct net_device         *netdev;
 603        __be32                     mkey_be;
 604        u8                         num_tc;
 605
 606        /* XDP_REDIRECT */
 607        struct mlx5e_xdpsq         xdpsq;
 608
 609        /* data path - accessed per napi poll */
 610        struct irq_desc *irq_desc;
 611        struct mlx5e_ch_stats     *stats;
 612
 613        /* control */
 614        struct mlx5e_priv         *priv;
 615        struct mlx5_core_dev      *mdev;
 616        struct hwtstamp_config    *tstamp;
 617        int                        ix;
 618        int                        cpu;
 619};
 620
 621struct mlx5e_channels {
 622        struct mlx5e_channel **c;
 623        unsigned int           num;
 624        struct mlx5e_params    params;
 625};
 626
 627struct mlx5e_channel_stats {
 628        struct mlx5e_ch_stats ch;
 629        struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 630        struct mlx5e_rq_stats rq;
 631        struct mlx5e_xdpsq_stats rq_xdpsq;
 632        struct mlx5e_xdpsq_stats xdpsq;
 633} ____cacheline_aligned_in_smp;
 634
 635enum {
 636        MLX5E_STATE_ASYNC_EVENTS_ENABLED,
 637        MLX5E_STATE_OPENED,
 638        MLX5E_STATE_DESTROYING,
 639};
 640
 641struct mlx5e_rqt {
 642        u32              rqtn;
 643        bool             enabled;
 644};
 645
 646struct mlx5e_tir {
 647        u32               tirn;
 648        struct mlx5e_rqt  rqt;
 649        struct list_head  list;
 650};
 651
 652enum {
 653        MLX5E_TC_PRIO = 0,
 654        MLX5E_NIC_PRIO
 655};
 656
 657struct mlx5e_priv {
 658        /* priv data path fields - start */
 659        struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
 660        int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 661#ifdef CONFIG_MLX5_CORE_EN_DCB
 662        struct mlx5e_dcbx_dp       dcbx_dp;
 663#endif
 664        /* priv data path fields - end */
 665
 666        u32                        msglevel;
 667        unsigned long              state;
 668        struct mutex               state_lock; /* Protects Interface state */
 669        struct mlx5e_rq            drop_rq;
 670
 671        struct mlx5e_channels      channels;
 672        u32                        tisn[MLX5E_MAX_NUM_TC];
 673        struct mlx5e_rqt           indir_rqt;
 674        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 675        struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
 676        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 677        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 678
 679        struct mlx5e_flow_steering fs;
 680
 681        struct workqueue_struct    *wq;
 682        struct work_struct         update_carrier_work;
 683        struct work_struct         set_rx_mode_work;
 684        struct work_struct         tx_timeout_work;
 685        struct work_struct         update_stats_work;
 686
 687        struct mlx5_core_dev      *mdev;
 688        struct net_device         *netdev;
 689        struct mlx5e_stats         stats;
 690        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
 691        u8                         max_opened_tc;
 692        struct hwtstamp_config     tstamp;
 693        u16                        q_counter;
 694        u16                        drop_rq_q_counter;
 695#ifdef CONFIG_MLX5_CORE_EN_DCB
 696        struct mlx5e_dcbx          dcbx;
 697#endif
 698
 699        const struct mlx5e_profile *profile;
 700        void                      *ppriv;
 701#ifdef CONFIG_MLX5_EN_IPSEC
 702        struct mlx5e_ipsec        *ipsec;
 703#endif
 704#ifdef CONFIG_MLX5_EN_TLS
 705        struct mlx5e_tls          *tls;
 706#endif
 707};
 708
 709struct mlx5e_profile {
 710        int     (*init)(struct mlx5_core_dev *mdev,
 711                        struct net_device *netdev,
 712                        const struct mlx5e_profile *profile, void *ppriv);
 713        void    (*cleanup)(struct mlx5e_priv *priv);
 714        int     (*init_rx)(struct mlx5e_priv *priv);
 715        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 716        int     (*init_tx)(struct mlx5e_priv *priv);
 717        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 718        void    (*enable)(struct mlx5e_priv *priv);
 719        void    (*disable)(struct mlx5e_priv *priv);
 720        void    (*update_stats)(struct mlx5e_priv *priv);
 721        void    (*update_carrier)(struct mlx5e_priv *priv);
 722        struct {
 723                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 724                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
 725        } rx_handlers;
 726        int     max_tc;
 727};
 728
 729void mlx5e_build_ptys2ethtool_map(void);
 730
 731u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 732                       struct net_device *sb_dev,
 733                       select_queue_fallback_t fallback);
 734netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 735netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 736                          struct mlx5e_tx_wqe *wqe, u16 pi);
 737
 738void mlx5e_completion_event(struct mlx5_core_cq *mcq);
 739void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 740int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 741bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 742int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 743void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
 744
 745bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 746bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
 747                                struct mlx5e_params *params);
 748
 749void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
 750void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
 751                        bool recycle);
 752void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 753void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 754bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 755bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
 756void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 757void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 758struct sk_buff *
 759mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 760                                u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 761struct sk_buff *
 762mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 763                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
 764struct sk_buff *
 765mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 766                          struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 767struct sk_buff *
 768mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 769                             struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
 770
 771void mlx5e_update_stats(struct mlx5e_priv *priv);
 772
 773void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 774int mlx5e_self_test_num(struct mlx5e_priv *priv);
 775void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
 776                     u64 *buf);
 777void mlx5e_set_rx_mode_work(struct work_struct *work);
 778
 779int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 780int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
 781int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 782
 783int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 784                          u16 vid);
 785int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 786                           u16 vid);
 787void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 788
 789struct mlx5e_redirect_rqt_param {
 790        bool is_rss;
 791        union {
 792                u32 rqn; /* Direct RQN (Non-RSS) */
 793                struct {
 794                        u8 hfunc;
 795                        struct mlx5e_channels *channels;
 796                } rss; /* RSS data */
 797        };
 798};
 799
 800int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
 801                       struct mlx5e_redirect_rqt_param rrp);
 802void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
 803                                    enum mlx5e_traffic_types tt,
 804                                    void *tirc, bool inner);
 805
 806int mlx5e_open_locked(struct net_device *netdev);
 807int mlx5e_close_locked(struct net_device *netdev);
 808
 809int mlx5e_open_channels(struct mlx5e_priv *priv,
 810                        struct mlx5e_channels *chs);
 811void mlx5e_close_channels(struct mlx5e_channels *chs);
 812
 813/* Function pointer to be used to modify WH settings while
 814 * switching channels
 815 */
 816typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
 817void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
 818                                struct mlx5e_channels *new_chs,
 819                                mlx5e_fp_hw_modify hw_modify);
 820void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
 821void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 822
 823void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
 824                                   int num_channels);
 825void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
 826                                 u8 cq_period_mode);
 827void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
 828                                 u8 cq_period_mode);
 829void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 830void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
 831                               struct mlx5e_params *params);
 832
 833static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 834{
 835        return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
 836                MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
 837}
 838
 839static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
 840                                      struct mlx5e_tx_wqe **wqe,
 841                                      u16 *pi)
 842{
 843        struct mlx5_wq_cyc *wq = &sq->wq;
 844
 845        *pi  = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 846        *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
 847        memset(*wqe, 0, sizeof(**wqe));
 848}
 849
 850static inline
 851struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
 852{
 853        u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
 854        struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 855        struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
 856
 857        memset(cseg, 0, sizeof(*cseg));
 858
 859        cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
 860        cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
 861
 862        (*pc)++;
 863
 864        return wqe;
 865}
 866
 867static inline
 868void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
 869                     void __iomem *uar_map,
 870                     struct mlx5_wqe_ctrl_seg *ctrl)
 871{
 872        ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 873        /* ensure wqe is visible to device before updating doorbell record */
 874        dma_wmb();
 875
 876        *wq->db = cpu_to_be32(pc);
 877
 878        /* ensure doorbell record is visible to device before ringing the
 879         * doorbell
 880         */
 881        wmb();
 882
 883        mlx5_write64((__be32 *)ctrl, uar_map, NULL);
 884}
 885
 886static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 887{
 888        struct mlx5_core_cq *mcq;
 889
 890        mcq = &cq->mcq;
 891        mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
 892}
 893
 894extern const struct ethtool_ops mlx5e_ethtool_ops;
 895#ifdef CONFIG_MLX5_CORE_EN_DCB
 896extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 897int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 898void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
 899void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
 900void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
 901#endif
 902
 903int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 904                     struct mlx5e_tir *tir, u32 *in, int inlen);
 905void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
 906                       struct mlx5e_tir *tir);
 907int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
 908void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 909int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
 910
 911/* common netdev helpers */
 912void mlx5e_create_q_counters(struct mlx5e_priv *priv);
 913void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
 914int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
 915                       struct mlx5e_rq *drop_rq);
 916void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
 917
 918int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
 919
 920int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
 921void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
 922
 923int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
 924void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
 925int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
 926void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
 927void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
 928
 929int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
 930                     u32 underlay_qpn, u32 *tisn);
 931void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
 932
 933int mlx5e_create_tises(struct mlx5e_priv *priv);
 934void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
 935int mlx5e_close(struct net_device *netdev);
 936int mlx5e_open(struct net_device *netdev);
 937
 938void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
 939int mlx5e_bits_invert(unsigned long a, int size);
 940
 941typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
 942int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
 943                     change_hw_mtu_cb set_mtu_cb);
 944
 945/* ethtool helpers */
 946void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
 947                               struct ethtool_drvinfo *drvinfo);
 948void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
 949                               uint32_t stringset, uint8_t *data);
 950int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
 951void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
 952                                     struct ethtool_stats *stats, u64 *data);
 953void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
 954                                 struct ethtool_ringparam *param);
 955int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 956                                struct ethtool_ringparam *param);
 957void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
 958                                struct ethtool_channels *ch);
 959int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
 960                               struct ethtool_channels *ch);
 961int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
 962                               struct ethtool_coalesce *coal);
 963int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
 964                               struct ethtool_coalesce *coal);
 965u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
 966u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
 967int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
 968                              struct ethtool_ts_info *info);
 969int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
 970                               struct ethtool_flash *flash);
 971
 972/* mlx5e generic netdev management API */
 973int mlx5e_netdev_init(struct net_device *netdev,
 974                      struct mlx5e_priv *priv,
 975                      struct mlx5_core_dev *mdev,
 976                      const struct mlx5e_profile *profile,
 977                      void *ppriv);
 978void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
 979struct net_device*
 980mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
 981                    int nch, void *ppriv);
 982int mlx5e_attach_netdev(struct mlx5e_priv *priv);
 983void mlx5e_detach_netdev(struct mlx5e_priv *priv);
 984void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
 985void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 986                            struct mlx5e_params *params,
 987                            u16 max_channels, u16 mtu);
 988void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
 989                           struct mlx5e_params *params);
 990void mlx5e_build_rss_params(struct mlx5e_params *params);
 991u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
 992void mlx5e_rx_dim_work(struct work_struct *work);
 993void mlx5e_tx_dim_work(struct work_struct *work);
 994#endif /* __MLX5_EN_H__ */
 995