linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "lib/mlx5.h"
  34#include "en.h"
  35#include "en_accel/tls.h"
  36#include "en_accel/en_accel.h"
  37#include "en/ptp.h"
  38
  39static unsigned int stats_grps_num(struct mlx5e_priv *priv)
  40{
  41        return !priv->profile->stats_grps_num ? 0 :
  42                priv->profile->stats_grps_num(priv);
  43}
  44
  45unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
  46{
  47        mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
  48        const unsigned int num_stats_grps = stats_grps_num(priv);
  49        unsigned int total = 0;
  50        int i;
  51
  52        for (i = 0; i < num_stats_grps; i++)
  53                total += stats_grps[i]->get_num_stats(priv);
  54
  55        return total;
  56}
  57
  58void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
  59{
  60        mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
  61        const unsigned int num_stats_grps = stats_grps_num(priv);
  62        int i;
  63
  64        for (i = num_stats_grps - 1; i >= 0; i--)
  65                if (stats_grps[i]->update_stats &&
  66                    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
  67                        stats_grps[i]->update_stats(priv);
  68}
  69
  70void mlx5e_stats_update(struct mlx5e_priv *priv)
  71{
  72        mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
  73        const unsigned int num_stats_grps = stats_grps_num(priv);
  74        int i;
  75
  76        for (i = num_stats_grps - 1; i >= 0; i--)
  77                if (stats_grps[i]->update_stats)
  78                        stats_grps[i]->update_stats(priv);
  79}
  80
  81void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
  82{
  83        mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
  84        const unsigned int num_stats_grps = stats_grps_num(priv);
  85        int i;
  86
  87        for (i = 0; i < num_stats_grps; i++)
  88                idx = stats_grps[i]->fill_stats(priv, data, idx);
  89}
  90
  91void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
  92{
  93        mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
  94        const unsigned int num_stats_grps = stats_grps_num(priv);
  95        int i, idx = 0;
  96
  97        for (i = 0; i < num_stats_grps; i++)
  98                idx = stats_grps[i]->fill_strings(priv, data, idx);
  99}
 100
 101/* Concrete NIC Stats */
 102
 103static const struct counter_desc sw_stats_desc[] = {
 104        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
 105        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
 106        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
 107        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
 108        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
 109        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
 110        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
 111        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
 112        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
 113        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
 114        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
 115        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
 116
 117#ifdef CONFIG_MLX5_EN_TLS
 118        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
 119        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
 120        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
 121        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
 122        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
 123        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
 124        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
 125        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
 126        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
 127#endif
 128
 129        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
 130        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
 131        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
 132        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
 133        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
 134        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
 135        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
 136        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
 137        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
 138        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
 139        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
 140        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
 141        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
 142        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
 143        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
 144        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
 145        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
 146        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
 147        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
 148        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
 149        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
 150        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
 151        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
 152        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
 153        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
 154        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
 155        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
 156        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
 157        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
 158        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
 159        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
 160        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
 161        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
 162        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
 163        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
 164        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
 165        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
 166        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
 167        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
 168        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
 169        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
 170        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
 171        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
 172        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
 173        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
 174        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
 175        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
 176        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
 177        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
 178        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
 179        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
 180        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
 181        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
 182        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
 183        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
 184        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
 185#ifdef CONFIG_MLX5_EN_TLS
 186        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
 187        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
 188        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
 189        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
 190        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
 191        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
 192        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
 193        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
 194        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
 195        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
 196#endif
 197        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
 198        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
 199        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
 200        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
 201        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
 202        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
 203        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
 204        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
 205        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
 206        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
 207        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
 208        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
 209        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
 210        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
 211        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
 212        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
 213        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
 214        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
 215        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
 216        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
 217        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
 218        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
 219        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
 220        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
 221        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
 222        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
 223        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
 224        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
 225        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
 226        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
 227        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
 228};
 229
 230#define NUM_SW_COUNTERS                 ARRAY_SIZE(sw_stats_desc)
 231
 232static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
 233{
 234        return NUM_SW_COUNTERS;
 235}
 236
 237static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
 238{
 239        int i;
 240
 241        for (i = 0; i < NUM_SW_COUNTERS; i++)
 242                strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
 243        return idx;
 244}
 245
 246static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
 247{
 248        int i;
 249
 250        for (i = 0; i < NUM_SW_COUNTERS; i++)
 251                data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
 252        return idx;
 253}
 254
 255static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
 256                                                    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
 257{
 258        s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
 259        s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
 260        s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
 261        s->tx_xdp_nops  += xdpsq_red_stats->nops;
 262        s->tx_xdp_full  += xdpsq_red_stats->full;
 263        s->tx_xdp_err   += xdpsq_red_stats->err;
 264        s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
 265}
 266
 267static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
 268                                                  struct mlx5e_xdpsq_stats *xdpsq_stats)
 269{
 270        s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
 271        s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
 272        s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
 273        s->rx_xdp_tx_nops  += xdpsq_stats->nops;
 274        s->rx_xdp_tx_full  += xdpsq_stats->full;
 275        s->rx_xdp_tx_err   += xdpsq_stats->err;
 276        s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
 277}
 278
 279static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
 280                                                  struct mlx5e_xdpsq_stats *xsksq_stats)
 281{
 282        s->tx_xsk_xmit  += xsksq_stats->xmit;
 283        s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
 284        s->tx_xsk_inlnw += xsksq_stats->inlnw;
 285        s->tx_xsk_full  += xsksq_stats->full;
 286        s->tx_xsk_err   += xsksq_stats->err;
 287        s->tx_xsk_cqes  += xsksq_stats->cqes;
 288}
 289
 290static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
 291                                                  struct mlx5e_rq_stats *xskrq_stats)
 292{
 293        s->rx_xsk_packets                += xskrq_stats->packets;
 294        s->rx_xsk_bytes                  += xskrq_stats->bytes;
 295        s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
 296        s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
 297        s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
 298        s->rx_xsk_csum_none              += xskrq_stats->csum_none;
 299        s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
 300        s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
 301        s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
 302        s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
 303        s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
 304        s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
 305        s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
 306        s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
 307        s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
 308        s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
 309        s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
 310        s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
 311        s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
 312}
 313
 314static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
 315                                                     struct mlx5e_rq_stats *rq_stats)
 316{
 317        s->rx_packets                 += rq_stats->packets;
 318        s->rx_bytes                   += rq_stats->bytes;
 319        s->rx_lro_packets             += rq_stats->lro_packets;
 320        s->rx_lro_bytes               += rq_stats->lro_bytes;
 321        s->rx_gro_packets             += rq_stats->gro_packets;
 322        s->rx_gro_bytes               += rq_stats->gro_bytes;
 323        s->rx_gro_skbs                += rq_stats->gro_skbs;
 324        s->rx_gro_match_packets       += rq_stats->gro_match_packets;
 325        s->rx_gro_large_hds           += rq_stats->gro_large_hds;
 326        s->rx_ecn_mark                += rq_stats->ecn_mark;
 327        s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
 328        s->rx_csum_none               += rq_stats->csum_none;
 329        s->rx_csum_complete           += rq_stats->csum_complete;
 330        s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
 331        s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
 332        s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
 333        s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
 334        s->rx_xdp_drop                += rq_stats->xdp_drop;
 335        s->rx_xdp_redirect            += rq_stats->xdp_redirect;
 336        s->rx_wqe_err                 += rq_stats->wqe_err;
 337        s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
 338        s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
 339        s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
 340        s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
 341        s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
 342        s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
 343        s->rx_cache_reuse             += rq_stats->cache_reuse;
 344        s->rx_cache_full              += rq_stats->cache_full;
 345        s->rx_cache_empty             += rq_stats->cache_empty;
 346        s->rx_cache_busy              += rq_stats->cache_busy;
 347        s->rx_cache_waive             += rq_stats->cache_waive;
 348        s->rx_congst_umr              += rq_stats->congst_umr;
 349        s->rx_arfs_err                += rq_stats->arfs_err;
 350        s->rx_recover                 += rq_stats->recover;
 351#ifdef CONFIG_MLX5_EN_TLS
 352        s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
 353        s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
 354        s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
 355        s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
 356        s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
 357        s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
 358        s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
 359        s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
 360        s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
 361        s->rx_tls_err                 += rq_stats->tls_err;
 362#endif
 363}
 364
 365static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
 366                                                     struct mlx5e_ch_stats *ch_stats)
 367{
 368        s->ch_events      += ch_stats->events;
 369        s->ch_poll        += ch_stats->poll;
 370        s->ch_arm         += ch_stats->arm;
 371        s->ch_aff_change  += ch_stats->aff_change;
 372        s->ch_force_irq   += ch_stats->force_irq;
 373        s->ch_eq_rearm    += ch_stats->eq_rearm;
 374}
 375
 376static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
 377                                               struct mlx5e_sq_stats *sq_stats)
 378{
 379        s->tx_packets               += sq_stats->packets;
 380        s->tx_bytes                 += sq_stats->bytes;
 381        s->tx_tso_packets           += sq_stats->tso_packets;
 382        s->tx_tso_bytes             += sq_stats->tso_bytes;
 383        s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
 384        s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
 385        s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
 386        s->tx_nop                   += sq_stats->nop;
 387        s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
 388        s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
 389        s->tx_queue_stopped         += sq_stats->stopped;
 390        s->tx_queue_wake            += sq_stats->wake;
 391        s->tx_queue_dropped         += sq_stats->dropped;
 392        s->tx_cqe_err               += sq_stats->cqe_err;
 393        s->tx_recover               += sq_stats->recover;
 394        s->tx_xmit_more             += sq_stats->xmit_more;
 395        s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
 396        s->tx_csum_none             += sq_stats->csum_none;
 397        s->tx_csum_partial          += sq_stats->csum_partial;
 398#ifdef CONFIG_MLX5_EN_TLS
 399        s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
 400        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
 401        s->tx_tls_ooo               += sq_stats->tls_ooo;
 402        s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
 403        s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
 404        s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
 405        s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
 406        s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
 407        s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
 408#endif
 409        s->tx_cqes                  += sq_stats->cqes;
 410}
 411
 412static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
 413                                                struct mlx5e_sw_stats *s)
 414{
 415        int i;
 416
 417        if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
 418                return;
 419
 420        mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
 421
 422        if (priv->tx_ptp_opened) {
 423                for (i = 0; i < priv->max_opened_tc; i++) {
 424                        mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
 425
 426                        /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
 427                        barrier();
 428                }
 429        }
 430        if (priv->rx_ptp_opened) {
 431                mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
 432
 433                /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
 434                barrier();
 435        }
 436}
 437
 438static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
 439                                                struct mlx5e_sw_stats *s)
 440{
 441        struct mlx5e_sq_stats **stats;
 442        u16 max_qos_sqs;
 443        int i;
 444
 445        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
 446        max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
 447        stats = READ_ONCE(priv->htb.qos_sq_stats);
 448
 449        for (i = 0; i < max_qos_sqs; i++) {
 450                mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
 451
 452                /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
 453                barrier();
 454        }
 455}
 456
 457static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
 458{
 459        struct mlx5e_sw_stats *s = &priv->stats.sw;
 460        int i;
 461
 462        memset(s, 0, sizeof(*s));
 463
 464        for (i = 0; i < priv->stats_nch; i++) {
 465                struct mlx5e_channel_stats *channel_stats =
 466                        &priv->channel_stats[i];
 467                int j;
 468
 469                mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
 470                mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
 471                mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
 472                /* xdp redirect */
 473                mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
 474                /* AF_XDP zero-copy */
 475                mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
 476                mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
 477
 478                for (j = 0; j < priv->max_opened_tc; j++) {
 479                        mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
 480
 481                        /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
 482                        barrier();
 483                }
 484        }
 485        mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
 486        mlx5e_stats_grp_sw_update_stats_qos(priv, s);
 487}
 488
 489static const struct counter_desc q_stats_desc[] = {
 490        { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
 491};
 492
 493static const struct counter_desc drop_rq_stats_desc[] = {
 494        { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
 495};
 496
 497#define NUM_Q_COUNTERS                  ARRAY_SIZE(q_stats_desc)
 498#define NUM_DROP_RQ_COUNTERS            ARRAY_SIZE(drop_rq_stats_desc)
 499
 500static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
 501{
 502        int num_stats = 0;
 503
 504        if (priv->q_counter)
 505                num_stats += NUM_Q_COUNTERS;
 506
 507        if (priv->drop_rq_q_counter)
 508                num_stats += NUM_DROP_RQ_COUNTERS;
 509
 510        return num_stats;
 511}
 512
 513static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
 514{
 515        int i;
 516
 517        for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
 518                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 519                       q_stats_desc[i].format);
 520
 521        for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
 522                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 523                       drop_rq_stats_desc[i].format);
 524
 525        return idx;
 526}
 527
 528static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
 529{
 530        int i;
 531
 532        for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
 533                data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
 534                                                   q_stats_desc, i);
 535        for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
 536                data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
 537                                                   drop_rq_stats_desc, i);
 538        return idx;
 539}
 540
 541static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
 542{
 543        struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
 544        u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
 545        u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
 546        int ret;
 547
 548        MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
 549
 550        if (priv->q_counter) {
 551                MLX5_SET(query_q_counter_in, in, counter_set_id,
 552                         priv->q_counter);
 553                ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
 554                if (!ret)
 555                        qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
 556                                                          out, out_of_buffer);
 557        }
 558
 559        if (priv->drop_rq_q_counter) {
 560                MLX5_SET(query_q_counter_in, in, counter_set_id,
 561                         priv->drop_rq_q_counter);
 562                ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
 563                if (!ret)
 564                        qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
 565                                                            out, out_of_buffer);
 566        }
 567}
 568
 569#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
 570static const struct counter_desc vnic_env_stats_steer_desc[] = {
 571        { "rx_steer_missed_packets",
 572                VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
 573};
 574
 575static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
 576        { "dev_internal_queue_oob",
 577                VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
 578};
 579
 580#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
 581        (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
 582         ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
 583#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
 584        (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
 585         ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
 586
 587static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
 588{
 589        return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
 590                NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
 591}
 592
 593static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
 594{
 595        int i;
 596
 597        for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
 598                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 599                       vnic_env_stats_steer_desc[i].format);
 600
 601        for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
 602                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 603                       vnic_env_stats_dev_oob_desc[i].format);
 604        return idx;
 605}
 606
 607static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
 608{
 609        int i;
 610
 611        for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
 612                data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
 613                                                  vnic_env_stats_steer_desc, i);
 614
 615        for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
 616                data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
 617                                                  vnic_env_stats_dev_oob_desc, i);
 618        return idx;
 619}
 620
 621static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
 622{
 623        u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
 624        u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
 625        struct mlx5_core_dev *mdev = priv->mdev;
 626
 627        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 628                return;
 629
 630        MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
 631        mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
 632}
 633
 634#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
 635static const struct counter_desc vport_stats_desc[] = {
 636        { "rx_vport_unicast_packets",
 637                VPORT_COUNTER_OFF(received_eth_unicast.packets) },
 638        { "rx_vport_unicast_bytes",
 639                VPORT_COUNTER_OFF(received_eth_unicast.octets) },
 640        { "tx_vport_unicast_packets",
 641                VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
 642        { "tx_vport_unicast_bytes",
 643                VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
 644        { "rx_vport_multicast_packets",
 645                VPORT_COUNTER_OFF(received_eth_multicast.packets) },
 646        { "rx_vport_multicast_bytes",
 647                VPORT_COUNTER_OFF(received_eth_multicast.octets) },
 648        { "tx_vport_multicast_packets",
 649                VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
 650        { "tx_vport_multicast_bytes",
 651                VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
 652        { "rx_vport_broadcast_packets",
 653                VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
 654        { "rx_vport_broadcast_bytes",
 655                VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
 656        { "tx_vport_broadcast_packets",
 657                VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
 658        { "tx_vport_broadcast_bytes",
 659                VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
 660        { "rx_vport_rdma_unicast_packets",
 661                VPORT_COUNTER_OFF(received_ib_unicast.packets) },
 662        { "rx_vport_rdma_unicast_bytes",
 663                VPORT_COUNTER_OFF(received_ib_unicast.octets) },
 664        { "tx_vport_rdma_unicast_packets",
 665                VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
 666        { "tx_vport_rdma_unicast_bytes",
 667                VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
 668        { "rx_vport_rdma_multicast_packets",
 669                VPORT_COUNTER_OFF(received_ib_multicast.packets) },
 670        { "rx_vport_rdma_multicast_bytes",
 671                VPORT_COUNTER_OFF(received_ib_multicast.octets) },
 672        { "tx_vport_rdma_multicast_packets",
 673                VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
 674        { "tx_vport_rdma_multicast_bytes",
 675                VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
 676};
 677
 678#define NUM_VPORT_COUNTERS              ARRAY_SIZE(vport_stats_desc)
 679
 680static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
 681{
 682        return NUM_VPORT_COUNTERS;
 683}
 684
 685static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
 686{
 687        int i;
 688
 689        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 690                strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
 691        return idx;
 692}
 693
 694static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
 695{
 696        int i;
 697
 698        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 699                data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
 700                                                  vport_stats_desc, i);
 701        return idx;
 702}
 703
 704static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
 705{
 706        u32 *out = (u32 *)priv->stats.vport.query_vport_out;
 707        u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
 708        struct mlx5_core_dev *mdev = priv->mdev;
 709
 710        MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
 711        mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
 712}
 713
 714#define PPORT_802_3_OFF(c) \
 715        MLX5_BYTE_OFF(ppcnt_reg, \
 716                      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
 717static const struct counter_desc pport_802_3_stats_desc[] = {
 718        { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
 719        { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
 720        { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
 721        { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
 722        { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
 723        { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
 724        { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
 725        { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
 726        { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
 727        { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
 728        { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
 729        { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
 730        { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
 731        { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
 732        { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
 733        { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
 734        { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
 735        { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
 736};
 737
 738#define NUM_PPORT_802_3_COUNTERS        ARRAY_SIZE(pport_802_3_stats_desc)
 739
 740static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
 741{
 742        return NUM_PPORT_802_3_COUNTERS;
 743}
 744
 745static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
 746{
 747        int i;
 748
 749        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
 750                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
 751        return idx;
 752}
 753
 754static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
 755{
 756        int i;
 757
 758        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
 759                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
 760                                                  pport_802_3_stats_desc, i);
 761        return idx;
 762}
 763
 764#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
 765        (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
 766
 767static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
 768{
 769        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 770        struct mlx5_core_dev *mdev = priv->mdev;
 771        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 772        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 773        void *out;
 774
 775        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 776                return;
 777
 778        MLX5_SET(ppcnt_reg, in, local_port, 1);
 779        out = pstats->IEEE_802_3_counters;
 780        MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
 781        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 782}
 783
 784#define MLX5E_READ_CTR64_BE_F(ptr, set, c)              \
 785        be64_to_cpu(*(__be64 *)((char *)ptr +           \
 786                MLX5_BYTE_OFF(ppcnt_reg,                \
 787                              counter_set.set.c##_high)))
 788
 789static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
 790                                u32 *ppcnt_ieee_802_3)
 791{
 792        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
 793        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 794
 795        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 796                return -EOPNOTSUPP;
 797
 798        MLX5_SET(ppcnt_reg, in, local_port, 1);
 799        MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
 800        return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
 801                                    sz, MLX5_REG_PPCNT, 0, 0);
 802}
 803
 804void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
 805                           struct ethtool_pause_stats *pause_stats)
 806{
 807        u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
 808        struct mlx5_core_dev *mdev = priv->mdev;
 809
 810        if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
 811                return;
 812
 813        pause_stats->tx_pause_frames =
 814                MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
 815                                      eth_802_3_cntrs_grp_data_layout,
 816                                      a_pause_mac_ctrl_frames_transmitted);
 817        pause_stats->rx_pause_frames =
 818                MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
 819                                      eth_802_3_cntrs_grp_data_layout,
 820                                      a_pause_mac_ctrl_frames_received);
 821}
 822
 823void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
 824                             struct ethtool_eth_phy_stats *phy_stats)
 825{
 826        u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
 827        struct mlx5_core_dev *mdev = priv->mdev;
 828
 829        if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
 830                return;
 831
 832        phy_stats->SymbolErrorDuringCarrier =
 833                MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
 834                                      eth_802_3_cntrs_grp_data_layout,
 835                                      a_symbol_error_during_carrier);
 836}
 837
 838void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
 839                             struct ethtool_eth_mac_stats *mac_stats)
 840{
 841        u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
 842        struct mlx5_core_dev *mdev = priv->mdev;
 843
 844        if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
 845                return;
 846
 847#define RD(name)                                                        \
 848        MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,                         \
 849                              eth_802_3_cntrs_grp_data_layout,          \
 850                              name)
 851
 852        mac_stats->FramesTransmittedOK  = RD(a_frames_transmitted_ok);
 853        mac_stats->FramesReceivedOK     = RD(a_frames_received_ok);
 854        mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
 855        mac_stats->OctetsTransmittedOK  = RD(a_octets_transmitted_ok);
 856        mac_stats->OctetsReceivedOK     = RD(a_octets_received_ok);
 857        mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
 858        mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
 859        mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
 860        mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
 861        mac_stats->InRangeLengthErrors  = RD(a_in_range_length_errors);
 862        mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
 863        mac_stats->FrameTooLongErrors   = RD(a_frame_too_long_errors);
 864#undef RD
 865}
 866
 867void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
 868                              struct ethtool_eth_ctrl_stats *ctrl_stats)
 869{
 870        u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
 871        struct mlx5_core_dev *mdev = priv->mdev;
 872
 873        if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
 874                return;
 875
 876        ctrl_stats->MACControlFramesTransmitted =
 877                MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
 878                                      eth_802_3_cntrs_grp_data_layout,
 879                                      a_mac_control_frames_transmitted);
 880        ctrl_stats->MACControlFramesReceived =
 881                MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
 882                                      eth_802_3_cntrs_grp_data_layout,
 883                                      a_mac_control_frames_received);
 884        ctrl_stats->UnsupportedOpcodesReceived =
 885                MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
 886                                      eth_802_3_cntrs_grp_data_layout,
 887                                      a_unsupported_opcodes_received);
 888}
 889
 890#define PPORT_2863_OFF(c) \
 891        MLX5_BYTE_OFF(ppcnt_reg, \
 892                      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
 893static const struct counter_desc pport_2863_stats_desc[] = {
 894        { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
 895        { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
 896        { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
 897};
 898
 899#define NUM_PPORT_2863_COUNTERS         ARRAY_SIZE(pport_2863_stats_desc)
 900
 901static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
 902{
 903        return NUM_PPORT_2863_COUNTERS;
 904}
 905
 906static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
 907{
 908        int i;
 909
 910        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
 911                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
 912        return idx;
 913}
 914
 915static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
 916{
 917        int i;
 918
 919        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
 920                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
 921                                                  pport_2863_stats_desc, i);
 922        return idx;
 923}
 924
 925static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
 926{
 927        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 928        struct mlx5_core_dev *mdev = priv->mdev;
 929        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 930        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 931        void *out;
 932
 933        MLX5_SET(ppcnt_reg, in, local_port, 1);
 934        out = pstats->RFC_2863_counters;
 935        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
 936        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 937}
 938
 939#define PPORT_2819_OFF(c) \
 940        MLX5_BYTE_OFF(ppcnt_reg, \
 941                      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
 942static const struct counter_desc pport_2819_stats_desc[] = {
 943        { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
 944        { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
 945        { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
 946        { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
 947        { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
 948        { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
 949        { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
 950        { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
 951        { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
 952        { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
 953        { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
 954        { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
 955        { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
 956};
 957
 958#define NUM_PPORT_2819_COUNTERS         ARRAY_SIZE(pport_2819_stats_desc)
 959
 960static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
 961{
 962        return NUM_PPORT_2819_COUNTERS;
 963}
 964
 965static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
 966{
 967        int i;
 968
 969        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
 970                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
 971        return idx;
 972}
 973
 974static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
 975{
 976        int i;
 977
 978        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
 979                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
 980                                                  pport_2819_stats_desc, i);
 981        return idx;
 982}
 983
 984static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
 985{
 986        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 987        struct mlx5_core_dev *mdev = priv->mdev;
 988        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 989        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 990        void *out;
 991
 992        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 993                return;
 994
 995        MLX5_SET(ppcnt_reg, in, local_port, 1);
 996        out = pstats->RFC_2819_counters;
 997        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
 998        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 999}
1000
1001static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1002        {    0,    64 },
1003        {   65,   127 },
1004        {  128,   255 },
1005        {  256,   511 },
1006        {  512,  1023 },
1007        { 1024,  1518 },
1008        { 1519,  2047 },
1009        { 2048,  4095 },
1010        { 4096,  8191 },
1011        { 8192, 10239 },
1012        {}
1013};
1014
1015void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1016                          struct ethtool_rmon_stats *rmon,
1017                          const struct ethtool_rmon_hist_range **ranges)
1018{
1019        u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1020        struct mlx5_core_dev *mdev = priv->mdev;
1021        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1022        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1023
1024        MLX5_SET(ppcnt_reg, in, local_port, 1);
1025        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1026        if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1027                                 sz, MLX5_REG_PPCNT, 0, 0))
1028                return;
1029
1030#define RD(name)                                                \
1031        MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,          \
1032                              eth_2819_cntrs_grp_data_layout,   \
1033                              name)
1034
1035        rmon->undersize_pkts    = RD(ether_stats_undersize_pkts);
1036        rmon->fragments         = RD(ether_stats_fragments);
1037        rmon->jabbers           = RD(ether_stats_jabbers);
1038
1039        rmon->hist[0]           = RD(ether_stats_pkts64octets);
1040        rmon->hist[1]           = RD(ether_stats_pkts65to127octets);
1041        rmon->hist[2]           = RD(ether_stats_pkts128to255octets);
1042        rmon->hist[3]           = RD(ether_stats_pkts256to511octets);
1043        rmon->hist[4]           = RD(ether_stats_pkts512to1023octets);
1044        rmon->hist[5]           = RD(ether_stats_pkts1024to1518octets);
1045        rmon->hist[6]           = RD(ether_stats_pkts1519to2047octets);
1046        rmon->hist[7]           = RD(ether_stats_pkts2048to4095octets);
1047        rmon->hist[8]           = RD(ether_stats_pkts4096to8191octets);
1048        rmon->hist[9]           = RD(ether_stats_pkts8192to10239octets);
1049#undef RD
1050
1051        *ranges = mlx5e_rmon_ranges;
1052}
1053
1054#define PPORT_PHY_STATISTICAL_OFF(c) \
1055        MLX5_BYTE_OFF(ppcnt_reg, \
1056                      counter_set.phys_layer_statistical_cntrs.c##_high)
1057static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1058        { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1059        { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1060};
1061
1062static const struct counter_desc
1063pport_phy_statistical_err_lanes_stats_desc[] = {
1064        { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1065        { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1066        { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1067        { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1068};
1069
1070#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1071        ARRAY_SIZE(pport_phy_statistical_stats_desc)
1072#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1073        ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1074
1075static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1076{
1077        struct mlx5_core_dev *mdev = priv->mdev;
1078        int num_stats;
1079
1080        /* "1" for link_down_events special counter */
1081        num_stats = 1;
1082
1083        num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1084                     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1085
1086        num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1087                     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1088
1089        return num_stats;
1090}
1091
1092static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1093{
1094        struct mlx5_core_dev *mdev = priv->mdev;
1095        int i;
1096
1097        strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1098
1099        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1100                return idx;
1101
1102        for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1103                strcpy(data + (idx++) * ETH_GSTRING_LEN,
1104                       pport_phy_statistical_stats_desc[i].format);
1105
1106        if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1107                for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1108                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
1109                               pport_phy_statistical_err_lanes_stats_desc[i].format);
1110
1111        return idx;
1112}
1113
1114static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1115{
1116        struct mlx5_core_dev *mdev = priv->mdev;
1117        int i;
1118
1119        /* link_down_events_phy has special handling since it is not stored in __be64 format */
1120        data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1121                               counter_set.phys_layer_cntrs.link_down_events);
1122
1123        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1124                return idx;
1125
1126        for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1127                data[idx++] =
1128                        MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1129                                            pport_phy_statistical_stats_desc, i);
1130
1131        if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1132                for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1133                        data[idx++] =
1134                                MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1135                                                    pport_phy_statistical_err_lanes_stats_desc,
1136                                                    i);
1137        return idx;
1138}
1139
1140static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1141{
1142        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1143        struct mlx5_core_dev *mdev = priv->mdev;
1144        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1145        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1146        void *out;
1147
1148        MLX5_SET(ppcnt_reg, in, local_port, 1);
1149        out = pstats->phy_counters;
1150        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1151        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1152
1153        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1154                return;
1155
1156        out = pstats->phy_statistical_counters;
1157        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1158        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1159}
1160
1161void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1162                         struct ethtool_fec_stats *fec_stats)
1163{
1164        u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1165        struct mlx5_core_dev *mdev = priv->mdev;
1166        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1167        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1168
1169        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1170                return;
1171
1172        MLX5_SET(ppcnt_reg, in, local_port, 1);
1173        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1174        if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1175                                 sz, MLX5_REG_PPCNT, 0, 0))
1176                return;
1177
1178        fec_stats->corrected_bits.total =
1179                MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1180                                      phys_layer_statistical_cntrs,
1181                                      phy_corrected_bits);
1182}
1183
1184#define PPORT_ETH_EXT_OFF(c) \
1185        MLX5_BYTE_OFF(ppcnt_reg, \
1186                      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1187static const struct counter_desc pport_eth_ext_stats_desc[] = {
1188        { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1189};
1190
1191#define NUM_PPORT_ETH_EXT_COUNTERS      ARRAY_SIZE(pport_eth_ext_stats_desc)
1192
1193static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1194{
1195        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1196                return NUM_PPORT_ETH_EXT_COUNTERS;
1197
1198        return 0;
1199}
1200
1201static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1202{
1203        int i;
1204
1205        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1206                for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1207                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
1208                               pport_eth_ext_stats_desc[i].format);
1209        return idx;
1210}
1211
1212static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1213{
1214        int i;
1215
1216        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1217                for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1218                        data[idx++] =
1219                                MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1220                                                    pport_eth_ext_stats_desc, i);
1221        return idx;
1222}
1223
1224static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1225{
1226        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1227        struct mlx5_core_dev *mdev = priv->mdev;
1228        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1229        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1230        void *out;
1231
1232        if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1233                return;
1234
1235        MLX5_SET(ppcnt_reg, in, local_port, 1);
1236        out = pstats->eth_ext_counters;
1237        MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1238        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1239}
1240
1241#define PCIE_PERF_OFF(c) \
1242        MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1243static const struct counter_desc pcie_perf_stats_desc[] = {
1244        { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1245        { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1246};
1247
1248#define PCIE_PERF_OFF64(c) \
1249        MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1250static const struct counter_desc pcie_perf_stats_desc64[] = {
1251        { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1252};
1253
1254static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1255        { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1256        { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1257        { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1258        { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1259};
1260
1261#define NUM_PCIE_PERF_COUNTERS          ARRAY_SIZE(pcie_perf_stats_desc)
1262#define NUM_PCIE_PERF_COUNTERS64        ARRAY_SIZE(pcie_perf_stats_desc64)
1263#define NUM_PCIE_PERF_STALL_COUNTERS    ARRAY_SIZE(pcie_perf_stall_stats_desc)
1264
1265static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1266{
1267        int num_stats = 0;
1268
1269        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1270                num_stats += NUM_PCIE_PERF_COUNTERS;
1271
1272        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1273                num_stats += NUM_PCIE_PERF_COUNTERS64;
1274
1275        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1276                num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1277
1278        return num_stats;
1279}
1280
1281static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1282{
1283        int i;
1284
1285        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1286                for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1287                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
1288                               pcie_perf_stats_desc[i].format);
1289
1290        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1291                for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1292                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
1293                               pcie_perf_stats_desc64[i].format);
1294
1295        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1296                for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1297                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
1298                               pcie_perf_stall_stats_desc[i].format);
1299        return idx;
1300}
1301
1302static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1303{
1304        int i;
1305
1306        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1307                for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1308                        data[idx++] =
1309                                MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1310                                                    pcie_perf_stats_desc, i);
1311
1312        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1313                for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1314                        data[idx++] =
1315                                MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1316                                                    pcie_perf_stats_desc64, i);
1317
1318        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1319                for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1320                        data[idx++] =
1321                                MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1322                                                    pcie_perf_stall_stats_desc, i);
1323        return idx;
1324}
1325
1326static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1327{
1328        struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1329        struct mlx5_core_dev *mdev = priv->mdev;
1330        u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1331        int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1332        void *out;
1333
1334        if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1335                return;
1336
1337        out = pcie_stats->pcie_perf_counters;
1338        MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1339        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1340}
1341
1342#define PPORT_PER_TC_PRIO_OFF(c) \
1343        MLX5_BYTE_OFF(ppcnt_reg, \
1344                      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1345
1346static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1347        { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1348};
1349
1350#define NUM_PPORT_PER_TC_PRIO_COUNTERS  ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1351
1352#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1353        MLX5_BYTE_OFF(ppcnt_reg, \
1354                      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1355
1356static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1357        { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1358        { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1359};
1360
1361#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1362        ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1363
1364static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1365{
1366        struct mlx5_core_dev *mdev = priv->mdev;
1367
1368        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1369                return 0;
1370
1371        return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1372}
1373
1374static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1375{
1376        struct mlx5_core_dev *mdev = priv->mdev;
1377        int i, prio;
1378
1379        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1380                return idx;
1381
1382        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1383                for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1384                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1385                                pport_per_tc_prio_stats_desc[i].format, prio);
1386                for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1387                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1388                                pport_per_tc_congest_prio_stats_desc[i].format, prio);
1389        }
1390
1391        return idx;
1392}
1393
1394static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1395{
1396        struct mlx5e_pport_stats *pport = &priv->stats.pport;
1397        struct mlx5_core_dev *mdev = priv->mdev;
1398        int i, prio;
1399
1400        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1401                return idx;
1402
1403        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1404                for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1405                        data[idx++] =
1406                                MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1407                                                    pport_per_tc_prio_stats_desc, i);
1408                for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1409                        data[idx++] =
1410                                MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1411                                                    pport_per_tc_congest_prio_stats_desc, i);
1412        }
1413
1414        return idx;
1415}
1416
1417static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1418{
1419        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1420        struct mlx5_core_dev *mdev = priv->mdev;
1421        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1422        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1423        void *out;
1424        int prio;
1425
1426        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1427                return;
1428
1429        MLX5_SET(ppcnt_reg, in, pnat, 2);
1430        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1431        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1432                out = pstats->per_tc_prio_counters[prio];
1433                MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1434                mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1435        }
1436}
1437
1438static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1439{
1440        struct mlx5_core_dev *mdev = priv->mdev;
1441
1442        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1443                return 0;
1444
1445        return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1446}
1447
1448static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1449{
1450        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1451        struct mlx5_core_dev *mdev = priv->mdev;
1452        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1453        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1454        void *out;
1455        int prio;
1456
1457        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1458                return;
1459
1460        MLX5_SET(ppcnt_reg, in, pnat, 2);
1461        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1462        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1463                out = pstats->per_tc_congest_prio_counters[prio];
1464                MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1465                mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1466        }
1467}
1468
1469static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1470{
1471        return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1472                mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1473}
1474
1475static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1476{
1477        mlx5e_grp_per_tc_prio_update_stats(priv);
1478        mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1479}
1480
1481#define PPORT_PER_PRIO_OFF(c) \
1482        MLX5_BYTE_OFF(ppcnt_reg, \
1483                      counter_set.eth_per_prio_grp_data_layout.c##_high)
1484static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1485        { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1486        { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1487        { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1488        { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1489        { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1490};
1491
1492#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS     ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1493
1494static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1495{
1496        return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1497}
1498
1499static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1500                                                   u8 *data,
1501                                                   int idx)
1502{
1503        int i, prio;
1504
1505        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1506                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1507                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1508                                pport_per_prio_traffic_stats_desc[i].format, prio);
1509        }
1510
1511        return idx;
1512}
1513
1514static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1515                                                 u64 *data,
1516                                                 int idx)
1517{
1518        int i, prio;
1519
1520        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1521                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1522                        data[idx++] =
1523                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1524                                                    pport_per_prio_traffic_stats_desc, i);
1525        }
1526
1527        return idx;
1528}
1529
1530static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1531        /* %s is "global" or "prio{i}" */
1532        { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1533        { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1534        { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1535        { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1536        { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1537};
1538
1539static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1540        { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1541        { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1542};
1543
1544#define NUM_PPORT_PER_PRIO_PFC_COUNTERS         ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1545#define NUM_PPORT_PFC_STALL_COUNTERS(priv)      (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1546                                                 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1547                                                 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1548
1549static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1550{
1551        struct mlx5_core_dev *mdev = priv->mdev;
1552        u8 pfc_en_tx;
1553        u8 pfc_en_rx;
1554        int err;
1555
1556        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1557                return 0;
1558
1559        err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1560
1561        return err ? 0 : pfc_en_tx | pfc_en_rx;
1562}
1563
1564static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1565{
1566        struct mlx5_core_dev *mdev = priv->mdev;
1567        u32 rx_pause;
1568        u32 tx_pause;
1569        int err;
1570
1571        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1572                return false;
1573
1574        err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1575
1576        return err ? false : rx_pause | tx_pause;
1577}
1578
1579static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1580{
1581        return (mlx5e_query_global_pause_combined(priv) +
1582                hweight8(mlx5e_query_pfc_combined(priv))) *
1583                NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1584                NUM_PPORT_PFC_STALL_COUNTERS(priv);
1585}
1586
1587static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1588                                               u8 *data,
1589                                               int idx)
1590{
1591        unsigned long pfc_combined;
1592        int i, prio;
1593
1594        pfc_combined = mlx5e_query_pfc_combined(priv);
1595        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1596                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1597                        char pfc_string[ETH_GSTRING_LEN];
1598
1599                        snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1600                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1601                                pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1602                }
1603        }
1604
1605        if (mlx5e_query_global_pause_combined(priv)) {
1606                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1607                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1608                                pport_per_prio_pfc_stats_desc[i].format, "global");
1609                }
1610        }
1611
1612        for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1613                strcpy(data + (idx++) * ETH_GSTRING_LEN,
1614                       pport_pfc_stall_stats_desc[i].format);
1615
1616        return idx;
1617}
1618
1619static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1620                                             u64 *data,
1621                                             int idx)
1622{
1623        unsigned long pfc_combined;
1624        int i, prio;
1625
1626        pfc_combined = mlx5e_query_pfc_combined(priv);
1627        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1628                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1629                        data[idx++] =
1630                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1631                                                    pport_per_prio_pfc_stats_desc, i);
1632                }
1633        }
1634
1635        if (mlx5e_query_global_pause_combined(priv)) {
1636                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1637                        data[idx++] =
1638                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1639                                                    pport_per_prio_pfc_stats_desc, i);
1640                }
1641        }
1642
1643        for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1644                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1645                                                  pport_pfc_stall_stats_desc, i);
1646
1647        return idx;
1648}
1649
1650static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1651{
1652        return mlx5e_grp_per_prio_traffic_get_num_stats() +
1653                mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1654}
1655
1656static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1657{
1658        idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1659        idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1660        return idx;
1661}
1662
1663static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1664{
1665        idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1666        idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1667        return idx;
1668}
1669
1670static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1671{
1672        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1673        struct mlx5_core_dev *mdev = priv->mdev;
1674        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1675        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1676        int prio;
1677        void *out;
1678
1679        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1680                return;
1681
1682        MLX5_SET(ppcnt_reg, in, local_port, 1);
1683        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1684        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1685                out = pstats->per_prio_counters[prio];
1686                MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1687                mlx5_core_access_reg(mdev, in, sz, out, sz,
1688                                     MLX5_REG_PPCNT, 0, 0);
1689        }
1690}
1691
1692static const struct counter_desc mlx5e_pme_status_desc[] = {
1693        { "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1694};
1695
1696static const struct counter_desc mlx5e_pme_error_desc[] = {
1697        { "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1698        { "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1699        { "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1700};
1701
1702#define NUM_PME_STATUS_STATS            ARRAY_SIZE(mlx5e_pme_status_desc)
1703#define NUM_PME_ERR_STATS               ARRAY_SIZE(mlx5e_pme_error_desc)
1704
1705static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1706{
1707        return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1708}
1709
1710static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1711{
1712        int i;
1713
1714        for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1715                strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1716
1717        for (i = 0; i < NUM_PME_ERR_STATS; i++)
1718                strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1719
1720        return idx;
1721}
1722
1723static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1724{
1725        struct mlx5_pme_stats pme_stats;
1726        int i;
1727
1728        mlx5_get_pme_stats(priv->mdev, &pme_stats);
1729
1730        for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1731                data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1732                                                   mlx5e_pme_status_desc, i);
1733
1734        for (i = 0; i < NUM_PME_ERR_STATS; i++)
1735                data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1736                                                   mlx5e_pme_error_desc, i);
1737
1738        return idx;
1739}
1740
1741static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1742
1743static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1744{
1745        return mlx5e_tls_get_count(priv);
1746}
1747
1748static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1749{
1750        return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1751}
1752
1753static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1754{
1755        return idx + mlx5e_tls_get_stats(priv, data + idx);
1756}
1757
1758static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1759
1760static const struct counter_desc rq_stats_desc[] = {
1761        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1762        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1763        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1764        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1765        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1766        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1767        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1768        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1769        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1770        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1771        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1772        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1773        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1774        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1775        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1776        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1777        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1778        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1779        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1780        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1781        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1782        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1783        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1784        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1785        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1786        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1787        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1788        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1789        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1790        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1791        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1792        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1793        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1794        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1795#ifdef CONFIG_MLX5_EN_TLS
1796        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1797        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1798        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1799        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1800        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1801        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1802        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1803        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1804        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1805        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1806#endif
1807};
1808
1809static const struct counter_desc sq_stats_desc[] = {
1810        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1811        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1812        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1813        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1814        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1815        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1816        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1817        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1818        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1819        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1820        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1821        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1822#ifdef CONFIG_MLX5_EN_TLS
1823        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1824        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1825        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1826        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1827        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1828        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1829        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1830        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1831        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1832#endif
1833        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1834        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1835        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1836        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1837        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1838        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1839        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1840        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1841};
1842
1843static const struct counter_desc rq_xdpsq_stats_desc[] = {
1844        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1845        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1846        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1847        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1848        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1849        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1850        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1851};
1852
1853static const struct counter_desc xdpsq_stats_desc[] = {
1854        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1855        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1856        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1857        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1858        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1859        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1860        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1861};
1862
1863static const struct counter_desc xskrq_stats_desc[] = {
1864        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1865        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1866        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1867        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1868        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1869        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1870        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1871        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1872        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1873        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1874        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1875        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1876        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1877        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1878        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1879        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1880        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1881        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1882        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1883};
1884
1885static const struct counter_desc xsksq_stats_desc[] = {
1886        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1887        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1888        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1889        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1890        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1891        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1892};
1893
1894static const struct counter_desc ch_stats_desc[] = {
1895        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1896        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1897        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1898        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1899        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1900        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1901};
1902
1903static const struct counter_desc ptp_sq_stats_desc[] = {
1904        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
1905        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
1906        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1907        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1908        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1909        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
1910        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1911        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
1912        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
1913        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1914        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
1915        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
1916        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
1917        { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1918};
1919
1920static const struct counter_desc ptp_ch_stats_desc[] = {
1921        { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
1922        { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
1923        { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
1924        { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1925};
1926
1927static const struct counter_desc ptp_cq_stats_desc[] = {
1928        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
1929        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
1930        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
1931        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
1932};
1933
1934static const struct counter_desc ptp_rq_stats_desc[] = {
1935        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
1936        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
1937        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1938        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1939        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1940        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1941        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1942        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
1943        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1944        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1945        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
1946        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
1947        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1948        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1949        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1950        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1951        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1952        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1953        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1954        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1955        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1956        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
1957        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
1958        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
1959        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
1960        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
1961        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1962        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1963        { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
1964};
1965
1966static const struct counter_desc qos_sq_stats_desc[] = {
1967        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
1968        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
1969        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1970        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1971        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1972        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1973        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1974        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1975        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1976        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
1977        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1978        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1979#ifdef CONFIG_MLX5_EN_TLS
1980        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1981        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1982        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1983        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1984        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1985        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1986        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1987        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1988        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1989#endif
1990        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1991        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
1992        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
1993        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1994        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
1995        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
1996        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
1997        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1998};
1999
2000#define NUM_RQ_STATS                    ARRAY_SIZE(rq_stats_desc)
2001#define NUM_SQ_STATS                    ARRAY_SIZE(sq_stats_desc)
2002#define NUM_XDPSQ_STATS                 ARRAY_SIZE(xdpsq_stats_desc)
2003#define NUM_RQ_XDPSQ_STATS              ARRAY_SIZE(rq_xdpsq_stats_desc)
2004#define NUM_XSKRQ_STATS                 ARRAY_SIZE(xskrq_stats_desc)
2005#define NUM_XSKSQ_STATS                 ARRAY_SIZE(xsksq_stats_desc)
2006#define NUM_CH_STATS                    ARRAY_SIZE(ch_stats_desc)
2007#define NUM_PTP_SQ_STATS                ARRAY_SIZE(ptp_sq_stats_desc)
2008#define NUM_PTP_CH_STATS                ARRAY_SIZE(ptp_ch_stats_desc)
2009#define NUM_PTP_CQ_STATS                ARRAY_SIZE(ptp_cq_stats_desc)
2010#define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2011#define NUM_QOS_SQ_STATS                ARRAY_SIZE(qos_sq_stats_desc)
2012
2013static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2014{
2015        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2016        return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
2017}
2018
2019static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2020{
2021        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2022        u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2023        int i, qid;
2024
2025        for (qid = 0; qid < max_qos_sqs; qid++)
2026                for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2027                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2028                                qos_sq_stats_desc[i].format, qid);
2029
2030        return idx;
2031}
2032
2033static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2034{
2035        struct mlx5e_sq_stats **stats;
2036        u16 max_qos_sqs;
2037        int i, qid;
2038
2039        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2040        max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2041        stats = READ_ONCE(priv->htb.qos_sq_stats);
2042
2043        for (qid = 0; qid < max_qos_sqs; qid++) {
2044                struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2045
2046                for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2047                        data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2048        }
2049
2050        return idx;
2051}
2052
2053static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2054
2055static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2056{
2057        int num = NUM_PTP_CH_STATS;
2058
2059        if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2060                return 0;
2061
2062        if (priv->tx_ptp_opened)
2063                num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2064        if (priv->rx_ptp_opened)
2065                num += NUM_PTP_RQ_STATS;
2066
2067        return num;
2068}
2069
2070static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2071{
2072        int i, tc;
2073
2074        if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2075                return idx;
2076
2077        for (i = 0; i < NUM_PTP_CH_STATS; i++)
2078                sprintf(data + (idx++) * ETH_GSTRING_LEN,
2079                        ptp_ch_stats_desc[i].format);
2080
2081        if (priv->tx_ptp_opened) {
2082                for (tc = 0; tc < priv->max_opened_tc; tc++)
2083                        for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2084                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
2085                                        ptp_sq_stats_desc[i].format, tc);
2086
2087                for (tc = 0; tc < priv->max_opened_tc; tc++)
2088                        for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2089                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
2090                                        ptp_cq_stats_desc[i].format, tc);
2091        }
2092        if (priv->rx_ptp_opened) {
2093                for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2094                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2095                                ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2096        }
2097        return idx;
2098}
2099
2100static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2101{
2102        int i, tc;
2103
2104        if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2105                return idx;
2106
2107        for (i = 0; i < NUM_PTP_CH_STATS; i++)
2108                data[idx++] =
2109                        MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2110                                             ptp_ch_stats_desc, i);
2111
2112        if (priv->tx_ptp_opened) {
2113                for (tc = 0; tc < priv->max_opened_tc; tc++)
2114                        for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2115                                data[idx++] =
2116                                        MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2117                                                             ptp_sq_stats_desc, i);
2118
2119                for (tc = 0; tc < priv->max_opened_tc; tc++)
2120                        for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2121                                data[idx++] =
2122                                        MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2123                                                             ptp_cq_stats_desc, i);
2124        }
2125        if (priv->rx_ptp_opened) {
2126                for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2127                        data[idx++] =
2128                                MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2129                                                     ptp_rq_stats_desc, i);
2130        }
2131        return idx;
2132}
2133
2134static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2135
2136static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2137{
2138        int max_nch = priv->stats_nch;
2139
2140        return (NUM_RQ_STATS * max_nch) +
2141               (NUM_CH_STATS * max_nch) +
2142               (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2143               (NUM_RQ_XDPSQ_STATS * max_nch) +
2144               (NUM_XDPSQ_STATS * max_nch) +
2145               (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2146               (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2147}
2148
2149static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2150{
2151        bool is_xsk = priv->xsk.ever_used;
2152        int max_nch = priv->stats_nch;
2153        int i, j, tc;
2154
2155        for (i = 0; i < max_nch; i++)
2156                for (j = 0; j < NUM_CH_STATS; j++)
2157                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2158                                ch_stats_desc[j].format, i);
2159
2160        for (i = 0; i < max_nch; i++) {
2161                for (j = 0; j < NUM_RQ_STATS; j++)
2162                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2163                                rq_stats_desc[j].format, i);
2164                for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2165                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2166                                xskrq_stats_desc[j].format, i);
2167                for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2168                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2169                                rq_xdpsq_stats_desc[j].format, i);
2170        }
2171
2172        for (tc = 0; tc < priv->max_opened_tc; tc++)
2173                for (i = 0; i < max_nch; i++)
2174                        for (j = 0; j < NUM_SQ_STATS; j++)
2175                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
2176                                        sq_stats_desc[j].format,
2177                                        i + tc * max_nch);
2178
2179        for (i = 0; i < max_nch; i++) {
2180                for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2181                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2182                                xsksq_stats_desc[j].format, i);
2183                for (j = 0; j < NUM_XDPSQ_STATS; j++)
2184                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
2185                                xdpsq_stats_desc[j].format, i);
2186        }
2187
2188        return idx;
2189}
2190
2191static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2192{
2193        bool is_xsk = priv->xsk.ever_used;
2194        int max_nch = priv->stats_nch;
2195        int i, j, tc;
2196
2197        for (i = 0; i < max_nch; i++)
2198                for (j = 0; j < NUM_CH_STATS; j++)
2199                        data[idx++] =
2200                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
2201                                                     ch_stats_desc, j);
2202
2203        for (i = 0; i < max_nch; i++) {
2204                for (j = 0; j < NUM_RQ_STATS; j++)
2205                        data[idx++] =
2206                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
2207                                                     rq_stats_desc, j);
2208                for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2209                        data[idx++] =
2210                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
2211                                                     xskrq_stats_desc, j);
2212                for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2213                        data[idx++] =
2214                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
2215                                                     rq_xdpsq_stats_desc, j);
2216        }
2217
2218        for (tc = 0; tc < priv->max_opened_tc; tc++)
2219                for (i = 0; i < max_nch; i++)
2220                        for (j = 0; j < NUM_SQ_STATS; j++)
2221                                data[idx++] =
2222                                        MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
2223                                                             sq_stats_desc, j);
2224
2225        for (i = 0; i < max_nch; i++) {
2226                for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2227                        data[idx++] =
2228                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
2229                                                     xsksq_stats_desc, j);
2230                for (j = 0; j < NUM_XDPSQ_STATS; j++)
2231                        data[idx++] =
2232                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
2233                                                     xdpsq_stats_desc, j);
2234        }
2235
2236        return idx;
2237}
2238
2239static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2240
2241MLX5E_DEFINE_STATS_GRP(sw, 0);
2242MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2243MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2244MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2245MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2246MLX5E_DEFINE_STATS_GRP(2863, 0);
2247MLX5E_DEFINE_STATS_GRP(2819, 0);
2248MLX5E_DEFINE_STATS_GRP(phy, 0);
2249MLX5E_DEFINE_STATS_GRP(pcie, 0);
2250MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2251MLX5E_DEFINE_STATS_GRP(pme, 0);
2252MLX5E_DEFINE_STATS_GRP(channels, 0);
2253MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2254MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2255static MLX5E_DEFINE_STATS_GRP(tls, 0);
2256static MLX5E_DEFINE_STATS_GRP(ptp, 0);
2257static MLX5E_DEFINE_STATS_GRP(qos, 0);
2258
2259/* The stats groups order is opposite to the update_stats() order calls */
2260mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2261        &MLX5E_STATS_GRP(sw),
2262        &MLX5E_STATS_GRP(qcnt),
2263        &MLX5E_STATS_GRP(vnic_env),
2264        &MLX5E_STATS_GRP(vport),
2265        &MLX5E_STATS_GRP(802_3),
2266        &MLX5E_STATS_GRP(2863),
2267        &MLX5E_STATS_GRP(2819),
2268        &MLX5E_STATS_GRP(phy),
2269        &MLX5E_STATS_GRP(eth_ext),
2270        &MLX5E_STATS_GRP(pcie),
2271        &MLX5E_STATS_GRP(per_prio),
2272        &MLX5E_STATS_GRP(pme),
2273#ifdef CONFIG_MLX5_EN_IPSEC
2274        &MLX5E_STATS_GRP(ipsec_sw),
2275        &MLX5E_STATS_GRP(ipsec_hw),
2276#endif
2277        &MLX5E_STATS_GRP(tls),
2278        &MLX5E_STATS_GRP(channels),
2279        &MLX5E_STATS_GRP(per_port_buff_congest),
2280        &MLX5E_STATS_GRP(ptp),
2281        &MLX5E_STATS_GRP(qos),
2282};
2283
2284unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2285{
2286        return ARRAY_SIZE(mlx5e_nic_stats_grps);
2287}
2288