linux/drivers/net/ethernet/mellanox/mlx5/core/en.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_H__
  33#define __MLX5_EN_H__
  34
  35#include <linux/if_vlan.h>
  36#include <linux/etherdevice.h>
  37#include <linux/timecounter.h>
  38#include <linux/net_tstamp.h>
  39#include <linux/ptp_clock_kernel.h>
  40#include <linux/mlx5/driver.h>
  41#include <linux/mlx5/qp.h>
  42#include <linux/mlx5/cq.h>
  43#include <linux/mlx5/port.h>
  44#include <linux/mlx5/vport.h>
  45#include <linux/mlx5/transobj.h>
  46#include <linux/rhashtable.h>
  47#include <net/switchdev.h>
  48#include "wq.h"
  49#include "mlx5_core.h"
  50#include "en_stats.h"
  51
  52#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  53
  54#define MLX5E_MAX_NUM_TC        8
  55
  56#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
  57#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
  58#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
  59
  60#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
  61#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
  62#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
  63
  64#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
  65#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x3
  66#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
  67
  68#define MLX5_RX_HEADROOM NET_SKB_PAD
  69
  70#define MLX5_MPWRQ_LOG_STRIDE_SIZE              6  /* >= 6, HW restriction */
  71#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8  /* >= 6, HW restriction */
  72#define MLX5_MPWRQ_LOG_WQE_SZ                   18
  73#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  74                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  75#define MLX5_MPWRQ_PAGES_PER_WQE                BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  76#define MLX5_MPWRQ_STRIDES_PER_PAGE             (MLX5_MPWRQ_NUM_STRIDES >> \
  77                                                 MLX5_MPWRQ_WQE_PAGE_ORDER)
  78
  79#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  80#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
  81        (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
  82#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
  83
  84#define MLX5_UMR_ALIGN                          (2048)
  85#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD       (128)
  86
  87#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
  88#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
  89#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
  90
  91#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
  92#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
  93#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
  94#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
  95#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
  96#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
  97#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
  98
  99#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 100#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 101#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 102#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 103#define MLX5E_TX_CQ_POLL_BUDGET        128
 104#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 105#define MLX5E_SQ_BF_BUDGET             16
 106
 107#define MLX5E_ICOSQ_MAX_WQEBBS \
 108        (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
 109
 110#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 111#define MLX5E_XDP_IHS_DS_COUNT \
 112        DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
 113#define MLX5E_XDP_TX_DS_COUNT \
 114        (MLX5E_XDP_IHS_DS_COUNT + \
 115         (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
 116#define MLX5E_XDP_TX_WQEBBS \
 117        DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
 118
 119#define MLX5E_NUM_MAIN_GROUPS 9
 120
 121static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
 122{
 123        switch (wq_type) {
 124        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 125                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
 126                             wq_size / 2);
 127        default:
 128                return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
 129                             wq_size / 2);
 130        }
 131}
 132
 133static inline int mlx5_min_log_rq_size(int wq_type)
 134{
 135        switch (wq_type) {
 136        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 137                return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
 138        default:
 139                return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
 140        }
 141}
 142
 143static inline int mlx5_max_log_rq_size(int wq_type)
 144{
 145        switch (wq_type) {
 146        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 147                return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
 148        default:
 149                return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
 150        }
 151}
 152
 153enum {
 154        MLX5E_INLINE_MODE_L2,
 155        MLX5E_INLINE_MODE_VPORT_CONTEXT,
 156        MLX5_INLINE_MODE_NOT_REQUIRED,
 157};
 158
 159struct mlx5e_tx_wqe {
 160        struct mlx5_wqe_ctrl_seg ctrl;
 161        struct mlx5_wqe_eth_seg  eth;
 162};
 163
 164struct mlx5e_rx_wqe {
 165        struct mlx5_wqe_srq_next_seg  next;
 166        struct mlx5_wqe_data_seg      data;
 167};
 168
 169struct mlx5e_umr_wqe {
 170        struct mlx5_wqe_ctrl_seg       ctrl;
 171        struct mlx5_wqe_umr_ctrl_seg   uctrl;
 172        struct mlx5_mkey_seg           mkc;
 173        struct mlx5_wqe_data_seg       data;
 174};
 175
 176static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
 177        "rx_cqe_moder",
 178};
 179
 180enum mlx5e_priv_flag {
 181        MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
 182};
 183
 184#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable)    \
 185        do {                                        \
 186                if (enable)                         \
 187                        priv->pflags |= pflag;      \
 188                else                                \
 189                        priv->pflags &= ~pflag;     \
 190        } while (0)
 191
 192#ifdef CONFIG_MLX5_CORE_EN_DCB
 193#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 194#endif
 195
 196struct mlx5e_cq_moder {
 197        u16 usec;
 198        u16 pkts;
 199};
 200
 201struct mlx5e_params {
 202        u8  log_sq_size;
 203        u8  rq_wq_type;
 204        u8  mpwqe_log_stride_sz;
 205        u8  mpwqe_log_num_strides;
 206        u8  log_rq_size;
 207        u16 num_channels;
 208        u8  num_tc;
 209        u8  rx_cq_period_mode;
 210        bool rx_cqe_compress_admin;
 211        bool rx_cqe_compress;
 212        struct mlx5e_cq_moder rx_cq_moderation;
 213        struct mlx5e_cq_moder tx_cq_moderation;
 214        u16 min_rx_wqes;
 215        bool lro_en;
 216        u32 lro_wqe_sz;
 217        u16 tx_max_inline;
 218        u8  tx_min_inline_mode;
 219        u8  rss_hfunc;
 220        u8  toeplitz_hash_key[40];
 221        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 222        bool vlan_strip_disable;
 223#ifdef CONFIG_MLX5_CORE_EN_DCB
 224        struct ieee_ets ets;
 225#endif
 226        bool rx_am_enabled;
 227        u32 lro_timeout;
 228};
 229
 230struct mlx5e_tstamp {
 231        rwlock_t                   lock;
 232        struct cyclecounter        cycles;
 233        struct timecounter         clock;
 234        struct hwtstamp_config     hwtstamp_config;
 235        u32                        nominal_c_mult;
 236        unsigned long              overflow_period;
 237        struct delayed_work        overflow_work;
 238        struct mlx5_core_dev      *mdev;
 239        struct ptp_clock          *ptp;
 240        struct ptp_clock_info      ptp_info;
 241};
 242
 243enum {
 244        MLX5E_RQ_STATE_ENABLED,
 245        MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
 246        MLX5E_RQ_STATE_AM,
 247};
 248
 249struct mlx5e_cq {
 250        /* data path - accessed per cqe */
 251        struct mlx5_cqwq           wq;
 252
 253        /* data path - accessed per napi poll */
 254        u16                        event_ctr;
 255        struct napi_struct        *napi;
 256        struct mlx5_core_cq        mcq;
 257        struct mlx5e_channel      *channel;
 258        struct mlx5e_priv         *priv;
 259
 260        /* cqe decompression */
 261        struct mlx5_cqe64          title;
 262        struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
 263        u8                         mini_arr_idx;
 264        u16                        decmprs_left;
 265        u16                        decmprs_wqe_counter;
 266
 267        /* control */
 268        struct mlx5_wq_ctrl        wq_ctrl;
 269} ____cacheline_aligned_in_smp;
 270
 271struct mlx5e_rq;
 272typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
 273                                       struct mlx5_cqe64 *cqe);
 274typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
 275                                  u16 ix);
 276
 277typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
 278
 279struct mlx5e_dma_info {
 280        struct page     *page;
 281        dma_addr_t      addr;
 282};
 283
 284struct mlx5e_rx_am_stats {
 285        int ppms; /* packets per msec */
 286        int epms; /* events per msec */
 287};
 288
 289struct mlx5e_rx_am_sample {
 290        ktime_t         time;
 291        unsigned int    pkt_ctr;
 292        u16             event_ctr;
 293};
 294
 295struct mlx5e_rx_am { /* Adaptive Moderation */
 296        u8                                      state;
 297        struct mlx5e_rx_am_stats                prev_stats;
 298        struct mlx5e_rx_am_sample               start_sample;
 299        struct work_struct                      work;
 300        u8                                      profile_ix;
 301        u8                                      mode;
 302        u8                                      tune_state;
 303        u8                                      steps_right;
 304        u8                                      steps_left;
 305        u8                                      tired;
 306};
 307
 308/* a single cache unit is capable to serve one napi call (for non-striding rq)
 309 * or a MPWQE (for striding rq).
 310 */
 311#define MLX5E_CACHE_UNIT        (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
 312                                 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
 313#define MLX5E_CACHE_SIZE        (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
 314struct mlx5e_page_cache {
 315        u32 head;
 316        u32 tail;
 317        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 318};
 319
 320struct mlx5e_rq {
 321        /* data path */
 322        struct mlx5_wq_ll      wq;
 323
 324        union {
 325                struct mlx5e_dma_info *dma_info;
 326                struct {
 327                        struct mlx5e_mpw_info *info;
 328                        void                  *mtt_no_align;
 329                        u32                    mtt_offset;
 330                } mpwqe;
 331        };
 332        struct {
 333                u8             page_order;
 334                u32            wqe_sz;    /* wqe data buffer size */
 335                u8             map_dir;   /* dma map direction */
 336        } buff;
 337        __be32                 mkey_be;
 338
 339        struct device         *pdev;
 340        struct net_device     *netdev;
 341        struct mlx5e_tstamp   *tstamp;
 342        struct mlx5e_rq_stats  stats;
 343        struct mlx5e_cq        cq;
 344        struct mlx5e_page_cache page_cache;
 345
 346        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
 347        mlx5e_fp_alloc_wqe     alloc_wqe;
 348        mlx5e_fp_dealloc_wqe   dealloc_wqe;
 349
 350        unsigned long          state;
 351        int                    ix;
 352
 353        struct mlx5e_rx_am     am; /* Adaptive Moderation */
 354        struct bpf_prog       *xdp_prog;
 355
 356        /* control */
 357        struct mlx5_wq_ctrl    wq_ctrl;
 358        u8                     wq_type;
 359        u32                    mpwqe_stride_sz;
 360        u32                    mpwqe_num_strides;
 361        u32                    rqn;
 362        struct mlx5e_channel  *channel;
 363        struct mlx5e_priv     *priv;
 364} ____cacheline_aligned_in_smp;
 365
 366struct mlx5e_umr_dma_info {
 367        __be64                *mtt;
 368        dma_addr_t             mtt_addr;
 369        struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
 370        struct mlx5e_umr_wqe   wqe;
 371};
 372
 373struct mlx5e_mpw_info {
 374        struct mlx5e_umr_dma_info umr;
 375        u16 consumed_strides;
 376        u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
 377};
 378
 379struct mlx5e_tx_wqe_info {
 380        u32 num_bytes;
 381        u8  num_wqebbs;
 382        u8  num_dma;
 383};
 384
 385enum mlx5e_dma_map_type {
 386        MLX5E_DMA_MAP_SINGLE,
 387        MLX5E_DMA_MAP_PAGE
 388};
 389
 390struct mlx5e_sq_dma {
 391        dma_addr_t              addr;
 392        u32                     size;
 393        enum mlx5e_dma_map_type type;
 394};
 395
 396enum {
 397        MLX5E_SQ_STATE_ENABLED,
 398        MLX5E_SQ_STATE_BF_ENABLE,
 399};
 400
 401struct mlx5e_sq_wqe_info {
 402        u8  opcode;
 403        u8  num_wqebbs;
 404};
 405
 406enum mlx5e_sq_type {
 407        MLX5E_SQ_TXQ,
 408        MLX5E_SQ_ICO,
 409        MLX5E_SQ_XDP
 410};
 411
 412struct mlx5e_sq {
 413        /* data path */
 414
 415        /* dirtied @completion */
 416        u16                        cc;
 417        u32                        dma_fifo_cc;
 418
 419        /* dirtied @xmit */
 420        u16                        pc ____cacheline_aligned_in_smp;
 421        u32                        dma_fifo_pc;
 422        u16                        bf_offset;
 423        u16                        prev_cc;
 424        u8                         bf_budget;
 425        struct mlx5e_sq_stats      stats;
 426
 427        struct mlx5e_cq            cq;
 428
 429        /* pointers to per tx element info: write@xmit, read@completion */
 430        union {
 431                struct {
 432                        struct sk_buff           **skb;
 433                        struct mlx5e_sq_dma       *dma_fifo;
 434                        struct mlx5e_tx_wqe_info  *wqe_info;
 435                } txq;
 436                struct mlx5e_sq_wqe_info *ico_wqe;
 437                struct {
 438                        struct mlx5e_sq_wqe_info  *wqe_info;
 439                        struct mlx5e_dma_info     *di;
 440                        bool                       doorbell;
 441                } xdp;
 442        } db;
 443
 444        /* read only */
 445        struct mlx5_wq_cyc         wq;
 446        u32                        dma_fifo_mask;
 447        void __iomem              *uar_map;
 448        struct netdev_queue       *txq;
 449        u32                        sqn;
 450        u16                        bf_buf_size;
 451        u16                        max_inline;
 452        u8                         min_inline_mode;
 453        u16                        edge;
 454        struct device             *pdev;
 455        struct mlx5e_tstamp       *tstamp;
 456        __be32                     mkey_be;
 457        unsigned long              state;
 458
 459        /* control path */
 460        struct mlx5_wq_ctrl        wq_ctrl;
 461        struct mlx5_uar            uar;
 462        struct mlx5e_channel      *channel;
 463        int                        tc;
 464        u32                        rate_limit;
 465        u8                         type;
 466} ____cacheline_aligned_in_smp;
 467
 468static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
 469{
 470        return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
 471                (sq->cc  == sq->pc));
 472}
 473
 474enum channel_flags {
 475        MLX5E_CHANNEL_NAPI_SCHED = 1,
 476};
 477
 478struct mlx5e_channel {
 479        /* data path */
 480        struct mlx5e_rq            rq;
 481        struct mlx5e_sq            xdp_sq;
 482        struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
 483        struct mlx5e_sq            icosq;   /* internal control operations */
 484        bool                       xdp;
 485        struct napi_struct         napi;
 486        struct device             *pdev;
 487        struct net_device         *netdev;
 488        __be32                     mkey_be;
 489        u8                         num_tc;
 490        unsigned long              flags;
 491
 492        /* control */
 493        struct mlx5e_priv         *priv;
 494        int                        ix;
 495        int                        cpu;
 496};
 497
 498enum mlx5e_traffic_types {
 499        MLX5E_TT_IPV4_TCP,
 500        MLX5E_TT_IPV6_TCP,
 501        MLX5E_TT_IPV4_UDP,
 502        MLX5E_TT_IPV6_UDP,
 503        MLX5E_TT_IPV4_IPSEC_AH,
 504        MLX5E_TT_IPV6_IPSEC_AH,
 505        MLX5E_TT_IPV4_IPSEC_ESP,
 506        MLX5E_TT_IPV6_IPSEC_ESP,
 507        MLX5E_TT_IPV4,
 508        MLX5E_TT_IPV6,
 509        MLX5E_TT_ANY,
 510        MLX5E_NUM_TT,
 511        MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
 512};
 513
 514enum {
 515        MLX5E_STATE_ASYNC_EVENTS_ENABLED,
 516        MLX5E_STATE_OPENED,
 517        MLX5E_STATE_DESTROYING,
 518};
 519
 520struct mlx5e_vxlan_db {
 521        spinlock_t                      lock; /* protect vxlan table */
 522        struct radix_tree_root          tree;
 523};
 524
 525struct mlx5e_l2_rule {
 526        u8  addr[ETH_ALEN + 2];
 527        struct mlx5_flow_rule *rule;
 528};
 529
 530struct mlx5e_flow_table {
 531        int num_groups;
 532        struct mlx5_flow_table *t;
 533        struct mlx5_flow_group **g;
 534};
 535
 536#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
 537
 538struct mlx5e_tc_table {
 539        struct mlx5_flow_table          *t;
 540
 541        struct rhashtable_params        ht_params;
 542        struct rhashtable               ht;
 543};
 544
 545struct mlx5e_vlan_table {
 546        struct mlx5e_flow_table         ft;
 547        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 548        struct mlx5_flow_rule   *active_vlans_rule[VLAN_N_VID];
 549        struct mlx5_flow_rule   *untagged_rule;
 550        struct mlx5_flow_rule   *any_vlan_rule;
 551        bool          filter_disabled;
 552};
 553
 554struct mlx5e_l2_table {
 555        struct mlx5e_flow_table    ft;
 556        struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
 557        struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
 558        struct mlx5e_l2_rule       broadcast;
 559        struct mlx5e_l2_rule       allmulti;
 560        struct mlx5e_l2_rule       promisc;
 561        bool                       broadcast_enabled;
 562        bool                       allmulti_enabled;
 563        bool                       promisc_enabled;
 564};
 565
 566/* L3/L4 traffic type classifier */
 567struct mlx5e_ttc_table {
 568        struct mlx5e_flow_table  ft;
 569        struct mlx5_flow_rule    *rules[MLX5E_NUM_TT];
 570};
 571
 572#define ARFS_HASH_SHIFT BITS_PER_BYTE
 573#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
 574struct arfs_table {
 575        struct mlx5e_flow_table  ft;
 576        struct mlx5_flow_rule    *default_rule;
 577        struct hlist_head        rules_hash[ARFS_HASH_SIZE];
 578};
 579
 580enum  arfs_type {
 581        ARFS_IPV4_TCP,
 582        ARFS_IPV6_TCP,
 583        ARFS_IPV4_UDP,
 584        ARFS_IPV6_UDP,
 585        ARFS_NUM_TYPES,
 586};
 587
 588struct mlx5e_arfs_tables {
 589        struct arfs_table arfs_tables[ARFS_NUM_TYPES];
 590        /* Protect aRFS rules list */
 591        spinlock_t                     arfs_lock;
 592        struct list_head               rules;
 593        int                            last_filter_id;
 594        struct workqueue_struct        *wq;
 595};
 596
 597/* NIC prio FTS */
 598enum {
 599        MLX5E_VLAN_FT_LEVEL = 0,
 600        MLX5E_L2_FT_LEVEL,
 601        MLX5E_TTC_FT_LEVEL,
 602        MLX5E_ARFS_FT_LEVEL
 603};
 604
 605struct mlx5e_ethtool_table {
 606        struct mlx5_flow_table *ft;
 607        int                    num_rules;
 608};
 609
 610#define ETHTOOL_NUM_L3_L4_FTS 7
 611#define ETHTOOL_NUM_L2_FTS 4
 612
 613struct mlx5e_ethtool_steering {
 614        struct mlx5e_ethtool_table      l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
 615        struct mlx5e_ethtool_table      l2_ft[ETHTOOL_NUM_L2_FTS];
 616        struct list_head                rules;
 617        int                             tot_num_rules;
 618};
 619
 620struct mlx5e_flow_steering {
 621        struct mlx5_flow_namespace      *ns;
 622        struct mlx5e_ethtool_steering   ethtool;
 623        struct mlx5e_tc_table           tc;
 624        struct mlx5e_vlan_table         vlan;
 625        struct mlx5e_l2_table           l2;
 626        struct mlx5e_ttc_table          ttc;
 627        struct mlx5e_arfs_tables        arfs;
 628};
 629
 630struct mlx5e_rqt {
 631        u32              rqtn;
 632        bool             enabled;
 633};
 634
 635struct mlx5e_tir {
 636        u32               tirn;
 637        struct mlx5e_rqt  rqt;
 638        struct list_head  list;
 639};
 640
 641enum {
 642        MLX5E_TC_PRIO = 0,
 643        MLX5E_NIC_PRIO
 644};
 645
 646struct mlx5e_profile {
 647        void    (*init)(struct mlx5_core_dev *mdev,
 648                        struct net_device *netdev,
 649                        const struct mlx5e_profile *profile, void *ppriv);
 650        void    (*cleanup)(struct mlx5e_priv *priv);
 651        int     (*init_rx)(struct mlx5e_priv *priv);
 652        void    (*cleanup_rx)(struct mlx5e_priv *priv);
 653        int     (*init_tx)(struct mlx5e_priv *priv);
 654        void    (*cleanup_tx)(struct mlx5e_priv *priv);
 655        void    (*enable)(struct mlx5e_priv *priv);
 656        void    (*disable)(struct mlx5e_priv *priv);
 657        void    (*update_stats)(struct mlx5e_priv *priv);
 658        int     (*max_nch)(struct mlx5_core_dev *mdev);
 659        int     max_tc;
 660};
 661
 662struct mlx5e_priv {
 663        /* priv data path fields - start */
 664        struct mlx5e_sq            **txq_to_sq_map;
 665        int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 666        struct bpf_prog *xdp_prog;
 667        /* priv data path fields - end */
 668
 669        unsigned long              state;
 670        struct mutex               state_lock; /* Protects Interface state */
 671        struct mlx5_core_mkey      umr_mkey;
 672        struct mlx5e_rq            drop_rq;
 673
 674        struct mlx5e_channel     **channel;
 675        u32                        tisn[MLX5E_MAX_NUM_TC];
 676        struct mlx5e_rqt           indir_rqt;
 677        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
 678        struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
 679        u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 680
 681        struct mlx5e_flow_steering fs;
 682        struct mlx5e_vxlan_db      vxlan;
 683
 684        struct mlx5e_params        params;
 685        struct workqueue_struct    *wq;
 686        struct work_struct         update_carrier_work;
 687        struct work_struct         set_rx_mode_work;
 688        struct work_struct         tx_timeout_work;
 689        struct delayed_work        update_stats_work;
 690
 691        u32                        pflags;
 692        struct mlx5_core_dev      *mdev;
 693        struct net_device         *netdev;
 694        struct mlx5e_stats         stats;
 695        struct mlx5e_tstamp        tstamp;
 696        u16 q_counter;
 697        const struct mlx5e_profile *profile;
 698        void                      *ppriv;
 699};
 700
 701void mlx5e_build_ptys2ethtool_map(void);
 702
 703void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
 704u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 705                       void *accel_priv, select_queue_fallback_t fallback);
 706netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 707
 708void mlx5e_completion_event(struct mlx5_core_cq *mcq);
 709void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 710int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 711bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 712int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 713void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
 714
 715void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
 716                        bool recycle);
 717void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 718void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 719bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 720int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
 721int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
 722void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 723void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 724void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
 725void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 726struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 727
 728void mlx5e_rx_am(struct mlx5e_rq *rq);
 729void mlx5e_rx_am_work(struct work_struct *work);
 730struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
 731
 732void mlx5e_update_stats(struct mlx5e_priv *priv);
 733
 734int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
 735void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
 736void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
 737void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
 738int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
 739                           int location);
 740int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
 741                                struct ethtool_rxnfc *info, u32 *rule_locs);
 742int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
 743                               struct ethtool_rx_flow_spec *fs);
 744int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
 745                              int location);
 746void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
 747void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
 748void mlx5e_set_rx_mode_work(struct work_struct *work);
 749
 750void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
 751                        struct skb_shared_hwtstamps *hwts);
 752void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 753void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
 754int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
 755int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
 756void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
 757
 758int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 759                          u16 vid);
 760int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 761                           u16 vid);
 762void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 763void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 764
 765int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 766
 767int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
 768void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
 769
 770int mlx5e_open_locked(struct net_device *netdev);
 771int mlx5e_close_locked(struct net_device *netdev);
 772void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
 773                                   u32 *indirection_rqt, int len,
 774                                   int num_channels);
 775int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 776
 777void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
 778                                 u8 cq_period_mode);
 779
 780static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
 781                                      struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
 782{
 783        u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
 784
 785        /* ensure wqe is visible to device before updating doorbell record */
 786        dma_wmb();
 787
 788        *sq->wq.db = cpu_to_be32(sq->pc);
 789
 790        /* ensure doorbell record is visible to device before ringing the
 791         * doorbell
 792         */
 793        wmb();
 794        if (bf_sz)
 795                __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
 796        else
 797                mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
 798        /* flush the write-combining mapped buffer */
 799        wmb();
 800
 801        sq->bf_offset ^= sq->bf_buf_size;
 802}
 803
 804static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 805{
 806        struct mlx5_core_cq *mcq;
 807
 808        mcq = &cq->mcq;
 809        mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
 810}
 811
 812static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
 813{
 814        return rq->mpwqe.mtt_offset +
 815                wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
 816}
 817
 818static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 819{
 820        return min_t(int, mdev->priv.eq_table.num_comp_vectors,
 821                     MLX5E_MAX_NUM_CHANNELS);
 822}
 823
 824extern const struct ethtool_ops mlx5e_ethtool_ops;
 825#ifdef CONFIG_MLX5_CORE_EN_DCB
 826extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 827int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 828#endif
 829
 830#ifndef CONFIG_RFS_ACCEL
 831static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
 832{
 833        return 0;
 834}
 835
 836static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 837
 838static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 839{
 840        return -ENOTSUPP;
 841}
 842
 843static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 844{
 845        return -ENOTSUPP;
 846}
 847#else
 848int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
 849void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
 850int mlx5e_arfs_enable(struct mlx5e_priv *priv);
 851int mlx5e_arfs_disable(struct mlx5e_priv *priv);
 852int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
 853                        u16 rxq_index, u32 flow_id);
 854#endif
 855
 856u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
 857int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 858                     struct mlx5e_tir *tir, u32 *in, int inlen);
 859void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
 860                       struct mlx5e_tir *tir);
 861int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
 862void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 863int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
 864
 865struct mlx5_eswitch_rep;
 866int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
 867                         struct mlx5_eswitch_rep *rep);
 868void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
 869                            struct mlx5_eswitch_rep *rep);
 870int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
 871void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
 872                          struct mlx5_eswitch_rep *rep);
 873int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
 874void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
 875int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
 876void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 877
 878int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
 879void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
 880int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
 881void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
 882int mlx5e_create_tises(struct mlx5e_priv *priv);
 883void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
 884int mlx5e_close(struct net_device *netdev);
 885int mlx5e_open(struct net_device *netdev);
 886void mlx5e_update_stats_work(struct work_struct *work);
 887struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 888                                       const struct mlx5e_profile *profile,
 889                                       void *ppriv);
 890void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
 891int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
 892void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
 893struct rtnl_link_stats64 *
 894mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 895u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
 896
 897#endif /* __MLX5_EN_H__ */
 898