linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "lib/mlx5.h"
  34#include "en.h"
  35#include "en_accel/ipsec.h"
  36#include "en_accel/tls.h"
  37
  38static const struct counter_desc sw_stats_desc[] = {
  39        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
  40        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
  41        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
  42        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
  43        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
  44        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
  45        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
  46        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
  47        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
  48        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
  49
  50#ifdef CONFIG_MLX5_EN_TLS
  51        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
  52        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
  53        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
  54        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
  55        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
  56        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
  57        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
  58        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
  59        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
  60#endif
  61
  62        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
  63        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
  64        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
  65        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
  66        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
  67        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
  68        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
  69        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
  70        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
  71        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
  72        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
  73        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
  74        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
  75        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
  76        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
  77        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
  78        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
  79        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
  80        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
  81        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
  82        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
  83        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
  84        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
  85        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
  86        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
  87        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
  88        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
  89        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
  90        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
  91        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
  92        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
  93        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
  94        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
  95        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
  96        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
  97        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
  98        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
  99        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
 100        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
 101        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
 102        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
 103        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
 104        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
 105        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
 106        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
 107        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
 108        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
 109        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
 110        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
 111        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
 112        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
 113        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
 114        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
 115        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
 116        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
 117        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
 118        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
 119        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
 120        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
 121        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
 122        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
 123        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
 124        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
 125        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
 126        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
 127        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
 128        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
 129        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
 130        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
 131        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
 132        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
 133        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
 134        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
 135        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
 136        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
 137        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
 138        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
 139        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
 140        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
 141};
 142
 143#define NUM_SW_COUNTERS                 ARRAY_SIZE(sw_stats_desc)
 144
 145static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
 146{
 147        return NUM_SW_COUNTERS;
 148}
 149
 150static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
 151{
 152        int i;
 153
 154        for (i = 0; i < NUM_SW_COUNTERS; i++)
 155                strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
 156        return idx;
 157}
 158
 159static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 160{
 161        int i;
 162
 163        for (i = 0; i < NUM_SW_COUNTERS; i++)
 164                data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
 165        return idx;
 166}
 167
 168static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
 169{
 170        struct mlx5e_sw_stats *s = &priv->stats.sw;
 171        int i;
 172
 173        memset(s, 0, sizeof(*s));
 174
 175        for (i = 0; i < priv->max_nch; i++) {
 176                struct mlx5e_channel_stats *channel_stats =
 177                        &priv->channel_stats[i];
 178                struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
 179                struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
 180                struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
 181                struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
 182                struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
 183                struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
 184                int j;
 185
 186                s->rx_packets   += rq_stats->packets;
 187                s->rx_bytes     += rq_stats->bytes;
 188                s->rx_lro_packets += rq_stats->lro_packets;
 189                s->rx_lro_bytes += rq_stats->lro_bytes;
 190                s->rx_ecn_mark  += rq_stats->ecn_mark;
 191                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
 192                s->rx_csum_none += rq_stats->csum_none;
 193                s->rx_csum_complete += rq_stats->csum_complete;
 194                s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
 195                s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
 196                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
 197                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
 198                s->rx_xdp_drop     += rq_stats->xdp_drop;
 199                s->rx_xdp_redirect += rq_stats->xdp_redirect;
 200                s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
 201                s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
 202                s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
 203                s->rx_xdp_tx_full  += xdpsq_stats->full;
 204                s->rx_xdp_tx_err   += xdpsq_stats->err;
 205                s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
 206                s->rx_wqe_err   += rq_stats->wqe_err;
 207                s->rx_mpwqe_filler_cqes    += rq_stats->mpwqe_filler_cqes;
 208                s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
 209                s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
 210                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
 211                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
 212                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
 213                s->rx_cache_reuse += rq_stats->cache_reuse;
 214                s->rx_cache_full  += rq_stats->cache_full;
 215                s->rx_cache_empty += rq_stats->cache_empty;
 216                s->rx_cache_busy  += rq_stats->cache_busy;
 217                s->rx_cache_waive += rq_stats->cache_waive;
 218                s->rx_congst_umr  += rq_stats->congst_umr;
 219                s->rx_arfs_err    += rq_stats->arfs_err;
 220                s->ch_events      += ch_stats->events;
 221                s->ch_poll        += ch_stats->poll;
 222                s->ch_arm         += ch_stats->arm;
 223                s->ch_aff_change  += ch_stats->aff_change;
 224                s->ch_force_irq   += ch_stats->force_irq;
 225                s->ch_eq_rearm    += ch_stats->eq_rearm;
 226                /* xdp redirect */
 227                s->tx_xdp_xmit    += xdpsq_red_stats->xmit;
 228                s->tx_xdp_mpwqe   += xdpsq_red_stats->mpwqe;
 229                s->tx_xdp_inlnw   += xdpsq_red_stats->inlnw;
 230                s->tx_xdp_full    += xdpsq_red_stats->full;
 231                s->tx_xdp_err     += xdpsq_red_stats->err;
 232                s->tx_xdp_cqes    += xdpsq_red_stats->cqes;
 233                /* AF_XDP zero-copy */
 234                s->rx_xsk_packets                += xskrq_stats->packets;
 235                s->rx_xsk_bytes                  += xskrq_stats->bytes;
 236                s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
 237                s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
 238                s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
 239                s->rx_xsk_csum_none              += xskrq_stats->csum_none;
 240                s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
 241                s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
 242                s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
 243                s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
 244                s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
 245                s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
 246                s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
 247                s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
 248                s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
 249                s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
 250                s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
 251                s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
 252                s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
 253                s->tx_xsk_xmit                   += xsksq_stats->xmit;
 254                s->tx_xsk_mpwqe                  += xsksq_stats->mpwqe;
 255                s->tx_xsk_inlnw                  += xsksq_stats->inlnw;
 256                s->tx_xsk_full                   += xsksq_stats->full;
 257                s->tx_xsk_err                    += xsksq_stats->err;
 258                s->tx_xsk_cqes                   += xsksq_stats->cqes;
 259
 260                for (j = 0; j < priv->max_opened_tc; j++) {
 261                        struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
 262
 263                        s->tx_packets           += sq_stats->packets;
 264                        s->tx_bytes             += sq_stats->bytes;
 265                        s->tx_tso_packets       += sq_stats->tso_packets;
 266                        s->tx_tso_bytes         += sq_stats->tso_bytes;
 267                        s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
 268                        s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
 269                        s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
 270                        s->tx_nop               += sq_stats->nop;
 271                        s->tx_queue_stopped     += sq_stats->stopped;
 272                        s->tx_queue_wake        += sq_stats->wake;
 273                        s->tx_queue_dropped     += sq_stats->dropped;
 274                        s->tx_cqe_err           += sq_stats->cqe_err;
 275                        s->tx_recover           += sq_stats->recover;
 276                        s->tx_xmit_more         += sq_stats->xmit_more;
 277                        s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
 278                        s->tx_csum_none         += sq_stats->csum_none;
 279                        s->tx_csum_partial      += sq_stats->csum_partial;
 280#ifdef CONFIG_MLX5_EN_TLS
 281                        s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
 282                        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
 283                        s->tx_tls_ctx               += sq_stats->tls_ctx;
 284                        s->tx_tls_ooo               += sq_stats->tls_ooo;
 285                        s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
 286                        s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
 287                        s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
 288                        s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
 289                        s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
 290#endif
 291                        s->tx_cqes              += sq_stats->cqes;
 292                }
 293        }
 294}
 295
 296static const struct counter_desc q_stats_desc[] = {
 297        { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
 298};
 299
 300static const struct counter_desc drop_rq_stats_desc[] = {
 301        { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
 302};
 303
 304#define NUM_Q_COUNTERS                  ARRAY_SIZE(q_stats_desc)
 305#define NUM_DROP_RQ_COUNTERS            ARRAY_SIZE(drop_rq_stats_desc)
 306
 307static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
 308{
 309        int num_stats = 0;
 310
 311        if (priv->q_counter)
 312                num_stats += NUM_Q_COUNTERS;
 313
 314        if (priv->drop_rq_q_counter)
 315                num_stats += NUM_DROP_RQ_COUNTERS;
 316
 317        return num_stats;
 318}
 319
 320static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
 321{
 322        int i;
 323
 324        for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
 325                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 326                       q_stats_desc[i].format);
 327
 328        for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
 329                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 330                       drop_rq_stats_desc[i].format);
 331
 332        return idx;
 333}
 334
 335static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 336{
 337        int i;
 338
 339        for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
 340                data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
 341                                                   q_stats_desc, i);
 342        for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
 343                data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
 344                                                   drop_rq_stats_desc, i);
 345        return idx;
 346}
 347
 348static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
 349{
 350        struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
 351        u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
 352
 353        if (priv->q_counter &&
 354            !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
 355                                       sizeof(out)))
 356                qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
 357                                                  out, out_of_buffer);
 358        if (priv->drop_rq_q_counter &&
 359            !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
 360                                       out, sizeof(out)))
 361                qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
 362                                                    out_of_buffer);
 363}
 364
 365#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
 366static const struct counter_desc vnic_env_stats_desc[] = {
 367        { "rx_steer_missed_packets",
 368                VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
 369};
 370
 371#define NUM_VNIC_ENV_COUNTERS           ARRAY_SIZE(vnic_env_stats_desc)
 372
 373static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
 374{
 375        return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
 376                NUM_VNIC_ENV_COUNTERS : 0;
 377}
 378
 379static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
 380                                           int idx)
 381{
 382        int i;
 383
 384        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 385                return idx;
 386
 387        for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
 388                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 389                       vnic_env_stats_desc[i].format);
 390        return idx;
 391}
 392
 393static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
 394                                         int idx)
 395{
 396        int i;
 397
 398        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 399                return idx;
 400
 401        for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
 402                data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
 403                                                  vnic_env_stats_desc, i);
 404        return idx;
 405}
 406
 407static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
 408{
 409        u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
 410        int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
 411        u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
 412        struct mlx5_core_dev *mdev = priv->mdev;
 413
 414        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 415                return;
 416
 417        MLX5_SET(query_vnic_env_in, in, opcode,
 418                 MLX5_CMD_OP_QUERY_VNIC_ENV);
 419        MLX5_SET(query_vnic_env_in, in, op_mod, 0);
 420        MLX5_SET(query_vnic_env_in, in, other_vport, 0);
 421        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 422}
 423
 424#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
 425static const struct counter_desc vport_stats_desc[] = {
 426        { "rx_vport_unicast_packets",
 427                VPORT_COUNTER_OFF(received_eth_unicast.packets) },
 428        { "rx_vport_unicast_bytes",
 429                VPORT_COUNTER_OFF(received_eth_unicast.octets) },
 430        { "tx_vport_unicast_packets",
 431                VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
 432        { "tx_vport_unicast_bytes",
 433                VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
 434        { "rx_vport_multicast_packets",
 435                VPORT_COUNTER_OFF(received_eth_multicast.packets) },
 436        { "rx_vport_multicast_bytes",
 437                VPORT_COUNTER_OFF(received_eth_multicast.octets) },
 438        { "tx_vport_multicast_packets",
 439                VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
 440        { "tx_vport_multicast_bytes",
 441                VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
 442        { "rx_vport_broadcast_packets",
 443                VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
 444        { "rx_vport_broadcast_bytes",
 445                VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
 446        { "tx_vport_broadcast_packets",
 447                VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
 448        { "tx_vport_broadcast_bytes",
 449                VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
 450        { "rx_vport_rdma_unicast_packets",
 451                VPORT_COUNTER_OFF(received_ib_unicast.packets) },
 452        { "rx_vport_rdma_unicast_bytes",
 453                VPORT_COUNTER_OFF(received_ib_unicast.octets) },
 454        { "tx_vport_rdma_unicast_packets",
 455                VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
 456        { "tx_vport_rdma_unicast_bytes",
 457                VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
 458        { "rx_vport_rdma_multicast_packets",
 459                VPORT_COUNTER_OFF(received_ib_multicast.packets) },
 460        { "rx_vport_rdma_multicast_bytes",
 461                VPORT_COUNTER_OFF(received_ib_multicast.octets) },
 462        { "tx_vport_rdma_multicast_packets",
 463                VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
 464        { "tx_vport_rdma_multicast_bytes",
 465                VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
 466};
 467
 468#define NUM_VPORT_COUNTERS              ARRAY_SIZE(vport_stats_desc)
 469
 470static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
 471{
 472        return NUM_VPORT_COUNTERS;
 473}
 474
 475static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
 476                                        int idx)
 477{
 478        int i;
 479
 480        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 481                strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
 482        return idx;
 483}
 484
 485static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
 486                                      int idx)
 487{
 488        int i;
 489
 490        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 491                data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
 492                                                  vport_stats_desc, i);
 493        return idx;
 494}
 495
 496static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
 497{
 498        int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 499        u32 *out = (u32 *)priv->stats.vport.query_vport_out;
 500        u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
 501        struct mlx5_core_dev *mdev = priv->mdev;
 502
 503        MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
 504        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
 505        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 506        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 507}
 508
 509#define PPORT_802_3_OFF(c) \
 510        MLX5_BYTE_OFF(ppcnt_reg, \
 511                      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
 512static const struct counter_desc pport_802_3_stats_desc[] = {
 513        { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
 514        { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
 515        { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
 516        { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
 517        { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
 518        { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
 519        { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
 520        { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
 521        { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
 522        { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
 523        { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
 524        { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
 525        { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
 526        { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
 527        { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
 528        { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
 529        { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
 530        { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
 531};
 532
 533#define NUM_PPORT_802_3_COUNTERS        ARRAY_SIZE(pport_802_3_stats_desc)
 534
 535static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
 536{
 537        return NUM_PPORT_802_3_COUNTERS;
 538}
 539
 540static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
 541                                        int idx)
 542{
 543        int i;
 544
 545        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
 546                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
 547        return idx;
 548}
 549
 550static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
 551                                      int idx)
 552{
 553        int i;
 554
 555        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
 556                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
 557                                                  pport_802_3_stats_desc, i);
 558        return idx;
 559}
 560
 561#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
 562        (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
 563
 564void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
 565{
 566        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 567        struct mlx5_core_dev *mdev = priv->mdev;
 568        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 569        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 570        void *out;
 571
 572        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 573                return;
 574
 575        MLX5_SET(ppcnt_reg, in, local_port, 1);
 576        out = pstats->IEEE_802_3_counters;
 577        MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
 578        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 579}
 580
 581#define PPORT_2863_OFF(c) \
 582        MLX5_BYTE_OFF(ppcnt_reg, \
 583                      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
 584static const struct counter_desc pport_2863_stats_desc[] = {
 585        { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
 586        { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
 587        { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
 588};
 589
 590#define NUM_PPORT_2863_COUNTERS         ARRAY_SIZE(pport_2863_stats_desc)
 591
 592static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
 593{
 594        return NUM_PPORT_2863_COUNTERS;
 595}
 596
 597static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
 598                                       int idx)
 599{
 600        int i;
 601
 602        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
 603                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
 604        return idx;
 605}
 606
 607static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
 608                                     int idx)
 609{
 610        int i;
 611
 612        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
 613                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
 614                                                  pport_2863_stats_desc, i);
 615        return idx;
 616}
 617
 618static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
 619{
 620        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 621        struct mlx5_core_dev *mdev = priv->mdev;
 622        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 623        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 624        void *out;
 625
 626        MLX5_SET(ppcnt_reg, in, local_port, 1);
 627        out = pstats->RFC_2863_counters;
 628        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
 629        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 630}
 631
 632#define PPORT_2819_OFF(c) \
 633        MLX5_BYTE_OFF(ppcnt_reg, \
 634                      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
 635static const struct counter_desc pport_2819_stats_desc[] = {
 636        { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
 637        { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
 638        { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
 639        { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
 640        { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
 641        { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
 642        { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
 643        { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
 644        { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
 645        { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
 646        { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
 647        { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
 648        { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
 649};
 650
 651#define NUM_PPORT_2819_COUNTERS         ARRAY_SIZE(pport_2819_stats_desc)
 652
 653static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
 654{
 655        return NUM_PPORT_2819_COUNTERS;
 656}
 657
 658static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
 659                                       int idx)
 660{
 661        int i;
 662
 663        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
 664                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
 665        return idx;
 666}
 667
 668static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
 669                                     int idx)
 670{
 671        int i;
 672
 673        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
 674                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
 675                                                  pport_2819_stats_desc, i);
 676        return idx;
 677}
 678
 679static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
 680{
 681        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 682        struct mlx5_core_dev *mdev = priv->mdev;
 683        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 684        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 685        void *out;
 686
 687        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 688                return;
 689
 690        MLX5_SET(ppcnt_reg, in, local_port, 1);
 691        out = pstats->RFC_2819_counters;
 692        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
 693        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 694}
 695
 696#define PPORT_PHY_STATISTICAL_OFF(c) \
 697        MLX5_BYTE_OFF(ppcnt_reg, \
 698                      counter_set.phys_layer_statistical_cntrs.c##_high)
 699static const struct counter_desc pport_phy_statistical_stats_desc[] = {
 700        { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
 701        { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
 702};
 703
 704static const struct counter_desc
 705pport_phy_statistical_err_lanes_stats_desc[] = {
 706        { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
 707        { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
 708        { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
 709        { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
 710};
 711
 712#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
 713        ARRAY_SIZE(pport_phy_statistical_stats_desc)
 714#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
 715        ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
 716
 717static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
 718{
 719        struct mlx5_core_dev *mdev = priv->mdev;
 720        int num_stats;
 721
 722        /* "1" for link_down_events special counter */
 723        num_stats = 1;
 724
 725        num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
 726                     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
 727
 728        num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
 729                     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
 730
 731        return num_stats;
 732}
 733
 734static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
 735                                      int idx)
 736{
 737        struct mlx5_core_dev *mdev = priv->mdev;
 738        int i;
 739
 740        strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
 741
 742        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
 743                return idx;
 744
 745        for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
 746                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 747                       pport_phy_statistical_stats_desc[i].format);
 748
 749        if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
 750                for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
 751                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 752                               pport_phy_statistical_err_lanes_stats_desc[i].format);
 753
 754        return idx;
 755}
 756
 757static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 758{
 759        struct mlx5_core_dev *mdev = priv->mdev;
 760        int i;
 761
 762        /* link_down_events_phy has special handling since it is not stored in __be64 format */
 763        data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
 764                               counter_set.phys_layer_cntrs.link_down_events);
 765
 766        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
 767                return idx;
 768
 769        for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
 770                data[idx++] =
 771                        MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
 772                                            pport_phy_statistical_stats_desc, i);
 773
 774        if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
 775                for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
 776                        data[idx++] =
 777                                MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
 778                                                    pport_phy_statistical_err_lanes_stats_desc,
 779                                                    i);
 780        return idx;
 781}
 782
 783static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
 784{
 785        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 786        struct mlx5_core_dev *mdev = priv->mdev;
 787        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 788        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 789        void *out;
 790
 791        MLX5_SET(ppcnt_reg, in, local_port, 1);
 792        out = pstats->phy_counters;
 793        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
 794        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 795
 796        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
 797                return;
 798
 799        out = pstats->phy_statistical_counters;
 800        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
 801        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 802}
 803
 804#define PPORT_ETH_EXT_OFF(c) \
 805        MLX5_BYTE_OFF(ppcnt_reg, \
 806                      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
 807static const struct counter_desc pport_eth_ext_stats_desc[] = {
 808        { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
 809};
 810
 811#define NUM_PPORT_ETH_EXT_COUNTERS      ARRAY_SIZE(pport_eth_ext_stats_desc)
 812
 813static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
 814{
 815        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
 816                return NUM_PPORT_ETH_EXT_COUNTERS;
 817
 818        return 0;
 819}
 820
 821static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
 822                                          int idx)
 823{
 824        int i;
 825
 826        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
 827                for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
 828                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 829                               pport_eth_ext_stats_desc[i].format);
 830        return idx;
 831}
 832
 833static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
 834                                        int idx)
 835{
 836        int i;
 837
 838        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
 839                for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
 840                        data[idx++] =
 841                                MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
 842                                                    pport_eth_ext_stats_desc, i);
 843        return idx;
 844}
 845
 846static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
 847{
 848        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 849        struct mlx5_core_dev *mdev = priv->mdev;
 850        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 851        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 852        void *out;
 853
 854        if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
 855                return;
 856
 857        MLX5_SET(ppcnt_reg, in, local_port, 1);
 858        out = pstats->eth_ext_counters;
 859        MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
 860        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 861}
 862
 863#define PCIE_PERF_OFF(c) \
 864        MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
 865static const struct counter_desc pcie_perf_stats_desc[] = {
 866        { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
 867        { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
 868};
 869
 870#define PCIE_PERF_OFF64(c) \
 871        MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
 872static const struct counter_desc pcie_perf_stats_desc64[] = {
 873        { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
 874};
 875
 876static const struct counter_desc pcie_perf_stall_stats_desc[] = {
 877        { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
 878        { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
 879        { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
 880        { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
 881};
 882
 883#define NUM_PCIE_PERF_COUNTERS          ARRAY_SIZE(pcie_perf_stats_desc)
 884#define NUM_PCIE_PERF_COUNTERS64        ARRAY_SIZE(pcie_perf_stats_desc64)
 885#define NUM_PCIE_PERF_STALL_COUNTERS    ARRAY_SIZE(pcie_perf_stall_stats_desc)
 886
 887static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
 888{
 889        int num_stats = 0;
 890
 891        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 892                num_stats += NUM_PCIE_PERF_COUNTERS;
 893
 894        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 895                num_stats += NUM_PCIE_PERF_COUNTERS64;
 896
 897        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
 898                num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
 899
 900        return num_stats;
 901}
 902
 903static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
 904                                       int idx)
 905{
 906        int i;
 907
 908        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 909                for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
 910                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 911                               pcie_perf_stats_desc[i].format);
 912
 913        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 914                for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
 915                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 916                               pcie_perf_stats_desc64[i].format);
 917
 918        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
 919                for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
 920                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 921                               pcie_perf_stall_stats_desc[i].format);
 922        return idx;
 923}
 924
 925static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
 926                                     int idx)
 927{
 928        int i;
 929
 930        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 931                for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
 932                        data[idx++] =
 933                                MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
 934                                                    pcie_perf_stats_desc, i);
 935
 936        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 937                for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
 938                        data[idx++] =
 939                                MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
 940                                                    pcie_perf_stats_desc64, i);
 941
 942        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
 943                for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
 944                        data[idx++] =
 945                                MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
 946                                                    pcie_perf_stall_stats_desc, i);
 947        return idx;
 948}
 949
 950static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
 951{
 952        struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
 953        struct mlx5_core_dev *mdev = priv->mdev;
 954        u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
 955        int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
 956        void *out;
 957
 958        if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
 959                return;
 960
 961        out = pcie_stats->pcie_perf_counters;
 962        MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
 963        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
 964}
 965
 966#define PPORT_PER_PRIO_OFF(c) \
 967        MLX5_BYTE_OFF(ppcnt_reg, \
 968                      counter_set.eth_per_prio_grp_data_layout.c##_high)
 969static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
 970        { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
 971        { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
 972        { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
 973        { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
 974};
 975
 976#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS     ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
 977
 978static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
 979{
 980        return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
 981}
 982
 983static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
 984                                                   u8 *data,
 985                                                   int idx)
 986{
 987        int i, prio;
 988
 989        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
 990                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
 991                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
 992                                pport_per_prio_traffic_stats_desc[i].format, prio);
 993        }
 994
 995        return idx;
 996}
 997
 998static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
 999                                                 u64 *data,
1000                                                 int idx)
1001{
1002        int i, prio;
1003
1004        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1005                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1006                        data[idx++] =
1007                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1008                                                    pport_per_prio_traffic_stats_desc, i);
1009        }
1010
1011        return idx;
1012}
1013
1014static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1015        /* %s is "global" or "prio{i}" */
1016        { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1017        { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1018        { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1019        { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1020        { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1021};
1022
1023static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1024        { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1025        { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1026};
1027
1028#define NUM_PPORT_PER_PRIO_PFC_COUNTERS         ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1029#define NUM_PPORT_PFC_STALL_COUNTERS(priv)      (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1030                                                 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1031                                                 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1032
1033static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1034{
1035        struct mlx5_core_dev *mdev = priv->mdev;
1036        u8 pfc_en_tx;
1037        u8 pfc_en_rx;
1038        int err;
1039
1040        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1041                return 0;
1042
1043        err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1044
1045        return err ? 0 : pfc_en_tx | pfc_en_rx;
1046}
1047
1048static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1049{
1050        struct mlx5_core_dev *mdev = priv->mdev;
1051        u32 rx_pause;
1052        u32 tx_pause;
1053        int err;
1054
1055        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1056                return false;
1057
1058        err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1059
1060        return err ? false : rx_pause | tx_pause;
1061}
1062
1063static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1064{
1065        return (mlx5e_query_global_pause_combined(priv) +
1066                hweight8(mlx5e_query_pfc_combined(priv))) *
1067                NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1068                NUM_PPORT_PFC_STALL_COUNTERS(priv);
1069}
1070
1071static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1072                                               u8 *data,
1073                                               int idx)
1074{
1075        unsigned long pfc_combined;
1076        int i, prio;
1077
1078        pfc_combined = mlx5e_query_pfc_combined(priv);
1079        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1080                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1081                        char pfc_string[ETH_GSTRING_LEN];
1082
1083                        snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1084                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1085                                pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1086                }
1087        }
1088
1089        if (mlx5e_query_global_pause_combined(priv)) {
1090                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1091                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1092                                pport_per_prio_pfc_stats_desc[i].format, "global");
1093                }
1094        }
1095
1096        for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1097                strcpy(data + (idx++) * ETH_GSTRING_LEN,
1098                       pport_pfc_stall_stats_desc[i].format);
1099
1100        return idx;
1101}
1102
1103static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1104                                             u64 *data,
1105                                             int idx)
1106{
1107        unsigned long pfc_combined;
1108        int i, prio;
1109
1110        pfc_combined = mlx5e_query_pfc_combined(priv);
1111        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1112                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1113                        data[idx++] =
1114                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1115                                                    pport_per_prio_pfc_stats_desc, i);
1116                }
1117        }
1118
1119        if (mlx5e_query_global_pause_combined(priv)) {
1120                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1121                        data[idx++] =
1122                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1123                                                    pport_per_prio_pfc_stats_desc, i);
1124                }
1125        }
1126
1127        for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1128                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1129                                                  pport_pfc_stall_stats_desc, i);
1130
1131        return idx;
1132}
1133
1134static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1135{
1136        return mlx5e_grp_per_prio_traffic_get_num_stats() +
1137                mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1138}
1139
1140static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1141                                           int idx)
1142{
1143        idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1144        idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1145        return idx;
1146}
1147
1148static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1149                                         int idx)
1150{
1151        idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1152        idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1153        return idx;
1154}
1155
1156static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1157{
1158        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1159        struct mlx5_core_dev *mdev = priv->mdev;
1160        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1161        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1162        int prio;
1163        void *out;
1164
1165        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1166                return;
1167
1168        MLX5_SET(ppcnt_reg, in, local_port, 1);
1169        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1170        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1171                out = pstats->per_prio_counters[prio];
1172                MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1173                mlx5_core_access_reg(mdev, in, sz, out, sz,
1174                                     MLX5_REG_PPCNT, 0, 0);
1175        }
1176}
1177
1178static const struct counter_desc mlx5e_pme_status_desc[] = {
1179        { "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1180};
1181
1182static const struct counter_desc mlx5e_pme_error_desc[] = {
1183        { "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1184        { "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1185        { "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1186};
1187
1188#define NUM_PME_STATUS_STATS            ARRAY_SIZE(mlx5e_pme_status_desc)
1189#define NUM_PME_ERR_STATS               ARRAY_SIZE(mlx5e_pme_error_desc)
1190
1191static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1192{
1193        return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1194}
1195
1196static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1197                                      int idx)
1198{
1199        int i;
1200
1201        for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1202                strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1203
1204        for (i = 0; i < NUM_PME_ERR_STATS; i++)
1205                strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1206
1207        return idx;
1208}
1209
1210static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1211                                    int idx)
1212{
1213        struct mlx5_pme_stats pme_stats;
1214        int i;
1215
1216        mlx5_get_pme_stats(priv->mdev, &pme_stats);
1217
1218        for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1219                data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1220                                                   mlx5e_pme_status_desc, i);
1221
1222        for (i = 0; i < NUM_PME_ERR_STATS; i++)
1223                data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1224                                                   mlx5e_pme_error_desc, i);
1225
1226        return idx;
1227}
1228
1229static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1230{
1231        return mlx5e_ipsec_get_count(priv);
1232}
1233
1234static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1235                                        int idx)
1236{
1237        return idx + mlx5e_ipsec_get_strings(priv,
1238                                             data + idx * ETH_GSTRING_LEN);
1239}
1240
1241static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1242                                      int idx)
1243{
1244        return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1245}
1246
1247static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1248{
1249        mlx5e_ipsec_update_stats(priv);
1250}
1251
1252static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1253{
1254        return mlx5e_tls_get_count(priv);
1255}
1256
1257static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1258                                      int idx)
1259{
1260        return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1261}
1262
1263static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1264{
1265        return idx + mlx5e_tls_get_stats(priv, data + idx);
1266}
1267
1268static const struct counter_desc rq_stats_desc[] = {
1269        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1270        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1271        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1272        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1273        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1274        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1275        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1276        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1277        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1278        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1279        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1280        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1281        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1282        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1283        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1284        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1285        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1286        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1287        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1288        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1289        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1290        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1291        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1292        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1293        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1294        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1295        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1296        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1297};
1298
1299static const struct counter_desc sq_stats_desc[] = {
1300        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1301        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1302        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1303        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1304        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1305        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1306        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1307        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1308        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1309        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1310#ifdef CONFIG_MLX5_EN_TLS
1311        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1312        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1313        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1314        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1315        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1316        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1317        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1318        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1319#endif
1320        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1321        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1322        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1323        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1324        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1325        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1326        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1327        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1328};
1329
1330static const struct counter_desc rq_xdpsq_stats_desc[] = {
1331        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1332        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1333        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1334        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1335        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1336        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1337};
1338
1339static const struct counter_desc xdpsq_stats_desc[] = {
1340        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1341        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1342        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1343        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1344        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1345        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1346};
1347
1348static const struct counter_desc xskrq_stats_desc[] = {
1349        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1350        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1351        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1352        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1353        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1354        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1355        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1356        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1357        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1358        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1359        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1360        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1361        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1362        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1363        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1364        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1365        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1366        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1367        { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1368};
1369
1370static const struct counter_desc xsksq_stats_desc[] = {
1371        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1372        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1373        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1374        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1375        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1376        { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1377};
1378
1379static const struct counter_desc ch_stats_desc[] = {
1380        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1381        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1382        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1383        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1384        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1385        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1386};
1387
1388#define NUM_RQ_STATS                    ARRAY_SIZE(rq_stats_desc)
1389#define NUM_SQ_STATS                    ARRAY_SIZE(sq_stats_desc)
1390#define NUM_XDPSQ_STATS                 ARRAY_SIZE(xdpsq_stats_desc)
1391#define NUM_RQ_XDPSQ_STATS              ARRAY_SIZE(rq_xdpsq_stats_desc)
1392#define NUM_XSKRQ_STATS                 ARRAY_SIZE(xskrq_stats_desc)
1393#define NUM_XSKSQ_STATS                 ARRAY_SIZE(xsksq_stats_desc)
1394#define NUM_CH_STATS                    ARRAY_SIZE(ch_stats_desc)
1395
1396static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1397{
1398        int max_nch = priv->max_nch;
1399
1400        return (NUM_RQ_STATS * max_nch) +
1401               (NUM_CH_STATS * max_nch) +
1402               (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1403               (NUM_RQ_XDPSQ_STATS * max_nch) +
1404               (NUM_XDPSQ_STATS * max_nch) +
1405               (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1406               (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1407}
1408
1409static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1410                                           int idx)
1411{
1412        bool is_xsk = priv->xsk.ever_used;
1413        int max_nch = priv->max_nch;
1414        int i, j, tc;
1415
1416        for (i = 0; i < max_nch; i++)
1417                for (j = 0; j < NUM_CH_STATS; j++)
1418                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1419                                ch_stats_desc[j].format, i);
1420
1421        for (i = 0; i < max_nch; i++) {
1422                for (j = 0; j < NUM_RQ_STATS; j++)
1423                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1424                                rq_stats_desc[j].format, i);
1425                for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1426                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1427                                xskrq_stats_desc[j].format, i);
1428                for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1429                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1430                                rq_xdpsq_stats_desc[j].format, i);
1431        }
1432
1433        for (tc = 0; tc < priv->max_opened_tc; tc++)
1434                for (i = 0; i < max_nch; i++)
1435                        for (j = 0; j < NUM_SQ_STATS; j++)
1436                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
1437                                        sq_stats_desc[j].format,
1438                                        priv->channel_tc2txq[i][tc]);
1439
1440        for (i = 0; i < max_nch; i++) {
1441                for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1442                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1443                                xsksq_stats_desc[j].format, i);
1444                for (j = 0; j < NUM_XDPSQ_STATS; j++)
1445                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1446                                xdpsq_stats_desc[j].format, i);
1447        }
1448
1449        return idx;
1450}
1451
1452static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1453                                         int idx)
1454{
1455        bool is_xsk = priv->xsk.ever_used;
1456        int max_nch = priv->max_nch;
1457        int i, j, tc;
1458
1459        for (i = 0; i < max_nch; i++)
1460                for (j = 0; j < NUM_CH_STATS; j++)
1461                        data[idx++] =
1462                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1463                                                     ch_stats_desc, j);
1464
1465        for (i = 0; i < max_nch; i++) {
1466                for (j = 0; j < NUM_RQ_STATS; j++)
1467                        data[idx++] =
1468                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1469                                                     rq_stats_desc, j);
1470                for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1471                        data[idx++] =
1472                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1473                                                     xskrq_stats_desc, j);
1474                for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1475                        data[idx++] =
1476                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1477                                                     rq_xdpsq_stats_desc, j);
1478        }
1479
1480        for (tc = 0; tc < priv->max_opened_tc; tc++)
1481                for (i = 0; i < max_nch; i++)
1482                        for (j = 0; j < NUM_SQ_STATS; j++)
1483                                data[idx++] =
1484                                        MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1485                                                             sq_stats_desc, j);
1486
1487        for (i = 0; i < max_nch; i++) {
1488                for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1489                        data[idx++] =
1490                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1491                                                     xsksq_stats_desc, j);
1492                for (j = 0; j < NUM_XDPSQ_STATS; j++)
1493                        data[idx++] =
1494                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1495                                                     xdpsq_stats_desc, j);
1496        }
1497
1498        return idx;
1499}
1500
1501/* The stats groups order is opposite to the update_stats() order calls */
1502const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1503        {
1504                .get_num_stats = mlx5e_grp_sw_get_num_stats,
1505                .fill_strings = mlx5e_grp_sw_fill_strings,
1506                .fill_stats = mlx5e_grp_sw_fill_stats,
1507                .update_stats = mlx5e_grp_sw_update_stats,
1508        },
1509        {
1510                .get_num_stats = mlx5e_grp_q_get_num_stats,
1511                .fill_strings = mlx5e_grp_q_fill_strings,
1512                .fill_stats = mlx5e_grp_q_fill_stats,
1513                .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1514                .update_stats = mlx5e_grp_q_update_stats,
1515        },
1516        {
1517                .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1518                .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1519                .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1520                .update_stats = mlx5e_grp_vnic_env_update_stats,
1521        },
1522        {
1523                .get_num_stats = mlx5e_grp_vport_get_num_stats,
1524                .fill_strings = mlx5e_grp_vport_fill_strings,
1525                .fill_stats = mlx5e_grp_vport_fill_stats,
1526                .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1527                .update_stats = mlx5e_grp_vport_update_stats,
1528        },
1529        {
1530                .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1531                .fill_strings = mlx5e_grp_802_3_fill_strings,
1532                .fill_stats = mlx5e_grp_802_3_fill_stats,
1533                .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1534                .update_stats = mlx5e_grp_802_3_update_stats,
1535        },
1536        {
1537                .get_num_stats = mlx5e_grp_2863_get_num_stats,
1538                .fill_strings = mlx5e_grp_2863_fill_strings,
1539                .fill_stats = mlx5e_grp_2863_fill_stats,
1540                .update_stats = mlx5e_grp_2863_update_stats,
1541        },
1542        {
1543                .get_num_stats = mlx5e_grp_2819_get_num_stats,
1544                .fill_strings = mlx5e_grp_2819_fill_strings,
1545                .fill_stats = mlx5e_grp_2819_fill_stats,
1546                .update_stats = mlx5e_grp_2819_update_stats,
1547        },
1548        {
1549                .get_num_stats = mlx5e_grp_phy_get_num_stats,
1550                .fill_strings = mlx5e_grp_phy_fill_strings,
1551                .fill_stats = mlx5e_grp_phy_fill_stats,
1552                .update_stats = mlx5e_grp_phy_update_stats,
1553        },
1554        {
1555                .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1556                .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1557                .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1558                .update_stats = mlx5e_grp_eth_ext_update_stats,
1559        },
1560        {
1561                .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1562                .fill_strings = mlx5e_grp_pcie_fill_strings,
1563                .fill_stats = mlx5e_grp_pcie_fill_stats,
1564                .update_stats = mlx5e_grp_pcie_update_stats,
1565        },
1566        {
1567                .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1568                .fill_strings = mlx5e_grp_per_prio_fill_strings,
1569                .fill_stats = mlx5e_grp_per_prio_fill_stats,
1570                .update_stats = mlx5e_grp_per_prio_update_stats,
1571        },
1572        {
1573                .get_num_stats = mlx5e_grp_pme_get_num_stats,
1574                .fill_strings = mlx5e_grp_pme_fill_strings,
1575                .fill_stats = mlx5e_grp_pme_fill_stats,
1576        },
1577        {
1578                .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1579                .fill_strings = mlx5e_grp_ipsec_fill_strings,
1580                .fill_stats = mlx5e_grp_ipsec_fill_stats,
1581                .update_stats = mlx5e_grp_ipsec_update_stats,
1582        },
1583        {
1584                .get_num_stats = mlx5e_grp_tls_get_num_stats,
1585                .fill_strings = mlx5e_grp_tls_fill_strings,
1586                .fill_stats = mlx5e_grp_tls_fill_stats,
1587        },
1588        {
1589                .get_num_stats = mlx5e_grp_channels_get_num_stats,
1590                .fill_strings = mlx5e_grp_channels_fill_strings,
1591                .fill_stats = mlx5e_grp_channels_fill_stats,
1592        }
1593};
1594
1595const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
1596