linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "lib/mlx5.h"
  34#include "en.h"
  35#include "en_accel/ipsec.h"
  36#include "en_accel/tls.h"
  37
  38static const struct counter_desc sw_stats_desc[] = {
  39        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
  40        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
  41        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
  42        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
  43        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
  44        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
  45        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
  46        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
  47        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
  48        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
  49
  50#ifdef CONFIG_MLX5_EN_TLS
  51        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
  52        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
  53#endif
  54
  55        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
  56        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
  57        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
  58        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
  59        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
  60        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
  61        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
  62        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
  63        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
  64        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
  65        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
  66        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
  67        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
  68        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
  69        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
  70        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
  71        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
  72        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
  73        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
  74        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
  75        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
  76        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
  77        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
  78        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
  79        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
  80        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
  81        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
  82        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
  83        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
  84        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
  85        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
  86        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
  87        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
  88        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
  89        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
  90        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
  91        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
  92        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
  93        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
  94        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
  95        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
  96        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
  97        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
  98        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
  99        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
 100        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
 101        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
 102        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
 103        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
 104        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
 105};
 106
 107#define NUM_SW_COUNTERS                 ARRAY_SIZE(sw_stats_desc)
 108
 109static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
 110{
 111        return NUM_SW_COUNTERS;
 112}
 113
 114static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
 115{
 116        int i;
 117
 118        for (i = 0; i < NUM_SW_COUNTERS; i++)
 119                strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
 120        return idx;
 121}
 122
 123static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 124{
 125        int i;
 126
 127        for (i = 0; i < NUM_SW_COUNTERS; i++)
 128                data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
 129        return idx;
 130}
 131
 132static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
 133{
 134        struct mlx5e_sw_stats *s = &priv->stats.sw;
 135        int i;
 136
 137        memset(s, 0, sizeof(*s));
 138
 139        for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
 140                struct mlx5e_channel_stats *channel_stats =
 141                        &priv->channel_stats[i];
 142                struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
 143                struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
 144                struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
 145                struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
 146                int j;
 147
 148                s->rx_packets   += rq_stats->packets;
 149                s->rx_bytes     += rq_stats->bytes;
 150                s->rx_lro_packets += rq_stats->lro_packets;
 151                s->rx_lro_bytes += rq_stats->lro_bytes;
 152                s->rx_ecn_mark  += rq_stats->ecn_mark;
 153                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
 154                s->rx_csum_none += rq_stats->csum_none;
 155                s->rx_csum_complete += rq_stats->csum_complete;
 156                s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
 157                s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
 158                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
 159                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
 160                s->rx_xdp_drop     += rq_stats->xdp_drop;
 161                s->rx_xdp_redirect += rq_stats->xdp_redirect;
 162                s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
 163                s->rx_xdp_tx_full  += xdpsq_stats->full;
 164                s->rx_xdp_tx_err   += xdpsq_stats->err;
 165                s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
 166                s->rx_wqe_err   += rq_stats->wqe_err;
 167                s->rx_mpwqe_filler_cqes    += rq_stats->mpwqe_filler_cqes;
 168                s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
 169                s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
 170                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
 171                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
 172                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
 173                s->rx_page_reuse  += rq_stats->page_reuse;
 174                s->rx_cache_reuse += rq_stats->cache_reuse;
 175                s->rx_cache_full  += rq_stats->cache_full;
 176                s->rx_cache_empty += rq_stats->cache_empty;
 177                s->rx_cache_busy  += rq_stats->cache_busy;
 178                s->rx_cache_waive += rq_stats->cache_waive;
 179                s->rx_congst_umr  += rq_stats->congst_umr;
 180                s->rx_arfs_err    += rq_stats->arfs_err;
 181                s->ch_events      += ch_stats->events;
 182                s->ch_poll        += ch_stats->poll;
 183                s->ch_arm         += ch_stats->arm;
 184                s->ch_aff_change  += ch_stats->aff_change;
 185                s->ch_eq_rearm    += ch_stats->eq_rearm;
 186                /* xdp redirect */
 187                s->tx_xdp_xmit    += xdpsq_red_stats->xmit;
 188                s->tx_xdp_full    += xdpsq_red_stats->full;
 189                s->tx_xdp_err     += xdpsq_red_stats->err;
 190                s->tx_xdp_cqes    += xdpsq_red_stats->cqes;
 191
 192                for (j = 0; j < priv->max_opened_tc; j++) {
 193                        struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
 194
 195                        s->tx_packets           += sq_stats->packets;
 196                        s->tx_bytes             += sq_stats->bytes;
 197                        s->tx_tso_packets       += sq_stats->tso_packets;
 198                        s->tx_tso_bytes         += sq_stats->tso_bytes;
 199                        s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
 200                        s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
 201                        s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
 202                        s->tx_nop               += sq_stats->nop;
 203                        s->tx_queue_stopped     += sq_stats->stopped;
 204                        s->tx_queue_wake        += sq_stats->wake;
 205                        s->tx_queue_dropped     += sq_stats->dropped;
 206                        s->tx_cqe_err           += sq_stats->cqe_err;
 207                        s->tx_recover           += sq_stats->recover;
 208                        s->tx_xmit_more         += sq_stats->xmit_more;
 209                        s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
 210                        s->tx_csum_none         += sq_stats->csum_none;
 211                        s->tx_csum_partial      += sq_stats->csum_partial;
 212#ifdef CONFIG_MLX5_EN_TLS
 213                        s->tx_tls_ooo           += sq_stats->tls_ooo;
 214                        s->tx_tls_resync_bytes  += sq_stats->tls_resync_bytes;
 215#endif
 216                        s->tx_cqes              += sq_stats->cqes;
 217                }
 218        }
 219}
 220
 221static const struct counter_desc q_stats_desc[] = {
 222        { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
 223};
 224
 225static const struct counter_desc drop_rq_stats_desc[] = {
 226        { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
 227};
 228
 229#define NUM_Q_COUNTERS                  ARRAY_SIZE(q_stats_desc)
 230#define NUM_DROP_RQ_COUNTERS            ARRAY_SIZE(drop_rq_stats_desc)
 231
 232static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
 233{
 234        int num_stats = 0;
 235
 236        if (priv->q_counter)
 237                num_stats += NUM_Q_COUNTERS;
 238
 239        if (priv->drop_rq_q_counter)
 240                num_stats += NUM_DROP_RQ_COUNTERS;
 241
 242        return num_stats;
 243}
 244
 245static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
 246{
 247        int i;
 248
 249        for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
 250                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 251                       q_stats_desc[i].format);
 252
 253        for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
 254                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 255                       drop_rq_stats_desc[i].format);
 256
 257        return idx;
 258}
 259
 260static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 261{
 262        int i;
 263
 264        for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
 265                data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
 266                                                   q_stats_desc, i);
 267        for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
 268                data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
 269                                                   drop_rq_stats_desc, i);
 270        return idx;
 271}
 272
 273static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
 274{
 275        struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
 276        u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
 277
 278        if (priv->q_counter &&
 279            !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
 280                                       sizeof(out)))
 281                qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
 282                                                  out, out_of_buffer);
 283        if (priv->drop_rq_q_counter &&
 284            !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
 285                                       out, sizeof(out)))
 286                qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
 287                                                    out_of_buffer);
 288}
 289
 290#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
 291static const struct counter_desc vnic_env_stats_desc[] = {
 292        { "rx_steer_missed_packets",
 293                VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
 294};
 295
 296#define NUM_VNIC_ENV_COUNTERS           ARRAY_SIZE(vnic_env_stats_desc)
 297
 298static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
 299{
 300        return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
 301                NUM_VNIC_ENV_COUNTERS : 0;
 302}
 303
 304static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
 305                                           int idx)
 306{
 307        int i;
 308
 309        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 310                return idx;
 311
 312        for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
 313                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 314                       vnic_env_stats_desc[i].format);
 315        return idx;
 316}
 317
 318static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
 319                                         int idx)
 320{
 321        int i;
 322
 323        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 324                return idx;
 325
 326        for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
 327                data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
 328                                                  vnic_env_stats_desc, i);
 329        return idx;
 330}
 331
 332static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
 333{
 334        u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
 335        int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
 336        u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
 337        struct mlx5_core_dev *mdev = priv->mdev;
 338
 339        if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
 340                return;
 341
 342        MLX5_SET(query_vnic_env_in, in, opcode,
 343                 MLX5_CMD_OP_QUERY_VNIC_ENV);
 344        MLX5_SET(query_vnic_env_in, in, op_mod, 0);
 345        MLX5_SET(query_vnic_env_in, in, other_vport, 0);
 346        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 347}
 348
 349#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
 350static const struct counter_desc vport_stats_desc[] = {
 351        { "rx_vport_unicast_packets",
 352                VPORT_COUNTER_OFF(received_eth_unicast.packets) },
 353        { "rx_vport_unicast_bytes",
 354                VPORT_COUNTER_OFF(received_eth_unicast.octets) },
 355        { "tx_vport_unicast_packets",
 356                VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
 357        { "tx_vport_unicast_bytes",
 358                VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
 359        { "rx_vport_multicast_packets",
 360                VPORT_COUNTER_OFF(received_eth_multicast.packets) },
 361        { "rx_vport_multicast_bytes",
 362                VPORT_COUNTER_OFF(received_eth_multicast.octets) },
 363        { "tx_vport_multicast_packets",
 364                VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
 365        { "tx_vport_multicast_bytes",
 366                VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
 367        { "rx_vport_broadcast_packets",
 368                VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
 369        { "rx_vport_broadcast_bytes",
 370                VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
 371        { "tx_vport_broadcast_packets",
 372                VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
 373        { "tx_vport_broadcast_bytes",
 374                VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
 375        { "rx_vport_rdma_unicast_packets",
 376                VPORT_COUNTER_OFF(received_ib_unicast.packets) },
 377        { "rx_vport_rdma_unicast_bytes",
 378                VPORT_COUNTER_OFF(received_ib_unicast.octets) },
 379        { "tx_vport_rdma_unicast_packets",
 380                VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
 381        { "tx_vport_rdma_unicast_bytes",
 382                VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
 383        { "rx_vport_rdma_multicast_packets",
 384                VPORT_COUNTER_OFF(received_ib_multicast.packets) },
 385        { "rx_vport_rdma_multicast_bytes",
 386                VPORT_COUNTER_OFF(received_ib_multicast.octets) },
 387        { "tx_vport_rdma_multicast_packets",
 388                VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
 389        { "tx_vport_rdma_multicast_bytes",
 390                VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
 391};
 392
 393#define NUM_VPORT_COUNTERS              ARRAY_SIZE(vport_stats_desc)
 394
 395static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
 396{
 397        return NUM_VPORT_COUNTERS;
 398}
 399
 400static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
 401                                        int idx)
 402{
 403        int i;
 404
 405        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 406                strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
 407        return idx;
 408}
 409
 410static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
 411                                      int idx)
 412{
 413        int i;
 414
 415        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 416                data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
 417                                                  vport_stats_desc, i);
 418        return idx;
 419}
 420
 421static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
 422{
 423        int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 424        u32 *out = (u32 *)priv->stats.vport.query_vport_out;
 425        u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
 426        struct mlx5_core_dev *mdev = priv->mdev;
 427
 428        MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
 429        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
 430        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 431        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 432}
 433
 434#define PPORT_802_3_OFF(c) \
 435        MLX5_BYTE_OFF(ppcnt_reg, \
 436                      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
 437static const struct counter_desc pport_802_3_stats_desc[] = {
 438        { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
 439        { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
 440        { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
 441        { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
 442        { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
 443        { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
 444        { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
 445        { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
 446        { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
 447        { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
 448        { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
 449        { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
 450        { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
 451        { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
 452        { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
 453        { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
 454        { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
 455        { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
 456};
 457
 458#define NUM_PPORT_802_3_COUNTERS        ARRAY_SIZE(pport_802_3_stats_desc)
 459
 460static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
 461{
 462        return NUM_PPORT_802_3_COUNTERS;
 463}
 464
 465static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
 466                                        int idx)
 467{
 468        int i;
 469
 470        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
 471                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
 472        return idx;
 473}
 474
 475static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
 476                                      int idx)
 477{
 478        int i;
 479
 480        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
 481                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
 482                                                  pport_802_3_stats_desc, i);
 483        return idx;
 484}
 485
 486#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
 487        (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
 488
 489void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
 490{
 491        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 492        struct mlx5_core_dev *mdev = priv->mdev;
 493        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 494        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 495        void *out;
 496
 497        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 498                return;
 499
 500        MLX5_SET(ppcnt_reg, in, local_port, 1);
 501        out = pstats->IEEE_802_3_counters;
 502        MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
 503        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 504}
 505
 506#define PPORT_2863_OFF(c) \
 507        MLX5_BYTE_OFF(ppcnt_reg, \
 508                      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
 509static const struct counter_desc pport_2863_stats_desc[] = {
 510        { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
 511        { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
 512        { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
 513};
 514
 515#define NUM_PPORT_2863_COUNTERS         ARRAY_SIZE(pport_2863_stats_desc)
 516
 517static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
 518{
 519        return NUM_PPORT_2863_COUNTERS;
 520}
 521
 522static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
 523                                       int idx)
 524{
 525        int i;
 526
 527        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
 528                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
 529        return idx;
 530}
 531
 532static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
 533                                     int idx)
 534{
 535        int i;
 536
 537        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
 538                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
 539                                                  pport_2863_stats_desc, i);
 540        return idx;
 541}
 542
 543static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
 544{
 545        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 546        struct mlx5_core_dev *mdev = priv->mdev;
 547        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 548        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 549        void *out;
 550
 551        MLX5_SET(ppcnt_reg, in, local_port, 1);
 552        out = pstats->RFC_2863_counters;
 553        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
 554        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 555}
 556
 557#define PPORT_2819_OFF(c) \
 558        MLX5_BYTE_OFF(ppcnt_reg, \
 559                      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
 560static const struct counter_desc pport_2819_stats_desc[] = {
 561        { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
 562        { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
 563        { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
 564        { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
 565        { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
 566        { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
 567        { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
 568        { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
 569        { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
 570        { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
 571        { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
 572        { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
 573        { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
 574};
 575
 576#define NUM_PPORT_2819_COUNTERS         ARRAY_SIZE(pport_2819_stats_desc)
 577
 578static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
 579{
 580        return NUM_PPORT_2819_COUNTERS;
 581}
 582
 583static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
 584                                       int idx)
 585{
 586        int i;
 587
 588        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
 589                strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
 590        return idx;
 591}
 592
 593static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
 594                                     int idx)
 595{
 596        int i;
 597
 598        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
 599                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
 600                                                  pport_2819_stats_desc, i);
 601        return idx;
 602}
 603
 604static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
 605{
 606        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 607        struct mlx5_core_dev *mdev = priv->mdev;
 608        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 609        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 610        void *out;
 611
 612        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
 613                return;
 614
 615        MLX5_SET(ppcnt_reg, in, local_port, 1);
 616        out = pstats->RFC_2819_counters;
 617        MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
 618        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 619}
 620
 621#define PPORT_PHY_STATISTICAL_OFF(c) \
 622        MLX5_BYTE_OFF(ppcnt_reg, \
 623                      counter_set.phys_layer_statistical_cntrs.c##_high)
 624static const struct counter_desc pport_phy_statistical_stats_desc[] = {
 625        { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
 626        { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
 627};
 628
 629static const struct counter_desc
 630pport_phy_statistical_err_lanes_stats_desc[] = {
 631        { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
 632        { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
 633        { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
 634        { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
 635};
 636
 637#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
 638        ARRAY_SIZE(pport_phy_statistical_stats_desc)
 639#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
 640        ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
 641
 642static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
 643{
 644        struct mlx5_core_dev *mdev = priv->mdev;
 645        int num_stats;
 646
 647        /* "1" for link_down_events special counter */
 648        num_stats = 1;
 649
 650        num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
 651                     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
 652
 653        num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
 654                     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
 655
 656        return num_stats;
 657}
 658
 659static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
 660                                      int idx)
 661{
 662        struct mlx5_core_dev *mdev = priv->mdev;
 663        int i;
 664
 665        strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
 666
 667        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
 668                return idx;
 669
 670        for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
 671                strcpy(data + (idx++) * ETH_GSTRING_LEN,
 672                       pport_phy_statistical_stats_desc[i].format);
 673
 674        if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
 675                for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
 676                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 677                               pport_phy_statistical_err_lanes_stats_desc[i].format);
 678
 679        return idx;
 680}
 681
 682static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
 683{
 684        struct mlx5_core_dev *mdev = priv->mdev;
 685        int i;
 686
 687        /* link_down_events_phy has special handling since it is not stored in __be64 format */
 688        data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
 689                               counter_set.phys_layer_cntrs.link_down_events);
 690
 691        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
 692                return idx;
 693
 694        for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
 695                data[idx++] =
 696                        MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
 697                                            pport_phy_statistical_stats_desc, i);
 698
 699        if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
 700                for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
 701                        data[idx++] =
 702                                MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
 703                                                    pport_phy_statistical_err_lanes_stats_desc,
 704                                                    i);
 705        return idx;
 706}
 707
 708static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
 709{
 710        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 711        struct mlx5_core_dev *mdev = priv->mdev;
 712        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 713        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 714        void *out;
 715
 716        MLX5_SET(ppcnt_reg, in, local_port, 1);
 717        out = pstats->phy_counters;
 718        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
 719        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 720
 721        if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
 722                return;
 723
 724        out = pstats->phy_statistical_counters;
 725        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
 726        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 727}
 728
 729#define PPORT_ETH_EXT_OFF(c) \
 730        MLX5_BYTE_OFF(ppcnt_reg, \
 731                      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
 732static const struct counter_desc pport_eth_ext_stats_desc[] = {
 733        { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
 734};
 735
 736#define NUM_PPORT_ETH_EXT_COUNTERS      ARRAY_SIZE(pport_eth_ext_stats_desc)
 737
 738static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
 739{
 740        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
 741                return NUM_PPORT_ETH_EXT_COUNTERS;
 742
 743        return 0;
 744}
 745
 746static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
 747                                          int idx)
 748{
 749        int i;
 750
 751        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
 752                for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
 753                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 754                               pport_eth_ext_stats_desc[i].format);
 755        return idx;
 756}
 757
 758static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
 759                                        int idx)
 760{
 761        int i;
 762
 763        if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
 764                for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
 765                        data[idx++] =
 766                                MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
 767                                                    pport_eth_ext_stats_desc, i);
 768        return idx;
 769}
 770
 771static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
 772{
 773        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 774        struct mlx5_core_dev *mdev = priv->mdev;
 775        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
 776        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 777        void *out;
 778
 779        if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
 780                return;
 781
 782        MLX5_SET(ppcnt_reg, in, local_port, 1);
 783        out = pstats->eth_ext_counters;
 784        MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
 785        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 786}
 787
 788#define PCIE_PERF_OFF(c) \
 789        MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
 790static const struct counter_desc pcie_perf_stats_desc[] = {
 791        { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
 792        { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
 793};
 794
 795#define PCIE_PERF_OFF64(c) \
 796        MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
 797static const struct counter_desc pcie_perf_stats_desc64[] = {
 798        { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
 799};
 800
 801static const struct counter_desc pcie_perf_stall_stats_desc[] = {
 802        { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
 803        { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
 804        { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
 805        { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
 806};
 807
 808#define NUM_PCIE_PERF_COUNTERS          ARRAY_SIZE(pcie_perf_stats_desc)
 809#define NUM_PCIE_PERF_COUNTERS64        ARRAY_SIZE(pcie_perf_stats_desc64)
 810#define NUM_PCIE_PERF_STALL_COUNTERS    ARRAY_SIZE(pcie_perf_stall_stats_desc)
 811
 812static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
 813{
 814        int num_stats = 0;
 815
 816        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 817                num_stats += NUM_PCIE_PERF_COUNTERS;
 818
 819        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 820                num_stats += NUM_PCIE_PERF_COUNTERS64;
 821
 822        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
 823                num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
 824
 825        return num_stats;
 826}
 827
 828static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
 829                                       int idx)
 830{
 831        int i;
 832
 833        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 834                for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
 835                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 836                               pcie_perf_stats_desc[i].format);
 837
 838        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 839                for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
 840                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 841                               pcie_perf_stats_desc64[i].format);
 842
 843        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
 844                for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
 845                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
 846                               pcie_perf_stall_stats_desc[i].format);
 847        return idx;
 848}
 849
 850static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
 851                                     int idx)
 852{
 853        int i;
 854
 855        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 856                for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
 857                        data[idx++] =
 858                                MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
 859                                                    pcie_perf_stats_desc, i);
 860
 861        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 862                for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
 863                        data[idx++] =
 864                                MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
 865                                                    pcie_perf_stats_desc64, i);
 866
 867        if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
 868                for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
 869                        data[idx++] =
 870                                MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
 871                                                    pcie_perf_stall_stats_desc, i);
 872        return idx;
 873}
 874
 875static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
 876{
 877        struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
 878        struct mlx5_core_dev *mdev = priv->mdev;
 879        u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
 880        int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
 881        void *out;
 882
 883        if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
 884                return;
 885
 886        out = pcie_stats->pcie_perf_counters;
 887        MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
 888        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
 889}
 890
 891#define PPORT_PER_PRIO_OFF(c) \
 892        MLX5_BYTE_OFF(ppcnt_reg, \
 893                      counter_set.eth_per_prio_grp_data_layout.c##_high)
 894static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
 895        { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
 896        { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
 897        { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
 898        { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
 899};
 900
 901#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS     ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
 902
 903static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
 904{
 905        return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
 906}
 907
 908static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
 909                                                   u8 *data,
 910                                                   int idx)
 911{
 912        int i, prio;
 913
 914        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
 915                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
 916                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
 917                                pport_per_prio_traffic_stats_desc[i].format, prio);
 918        }
 919
 920        return idx;
 921}
 922
 923static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
 924                                                 u64 *data,
 925                                                 int idx)
 926{
 927        int i, prio;
 928
 929        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
 930                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
 931                        data[idx++] =
 932                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
 933                                                    pport_per_prio_traffic_stats_desc, i);
 934        }
 935
 936        return idx;
 937}
 938
 939static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
 940        /* %s is "global" or "prio{i}" */
 941        { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
 942        { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
 943        { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
 944        { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
 945        { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
 946};
 947
 948static const struct counter_desc pport_pfc_stall_stats_desc[] = {
 949        { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
 950        { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
 951};
 952
 953#define NUM_PPORT_PER_PRIO_PFC_COUNTERS         ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
 954#define NUM_PPORT_PFC_STALL_COUNTERS(priv)      (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
 955                                                 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
 956                                                 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
 957
 958static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
 959{
 960        struct mlx5_core_dev *mdev = priv->mdev;
 961        u8 pfc_en_tx;
 962        u8 pfc_en_rx;
 963        int err;
 964
 965        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
 966                return 0;
 967
 968        err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
 969
 970        return err ? 0 : pfc_en_tx | pfc_en_rx;
 971}
 972
 973static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
 974{
 975        struct mlx5_core_dev *mdev = priv->mdev;
 976        u32 rx_pause;
 977        u32 tx_pause;
 978        int err;
 979
 980        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
 981                return false;
 982
 983        err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
 984
 985        return err ? false : rx_pause | tx_pause;
 986}
 987
 988static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
 989{
 990        return (mlx5e_query_global_pause_combined(priv) +
 991                hweight8(mlx5e_query_pfc_combined(priv))) *
 992                NUM_PPORT_PER_PRIO_PFC_COUNTERS +
 993                NUM_PPORT_PFC_STALL_COUNTERS(priv);
 994}
 995
 996static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
 997                                               u8 *data,
 998                                               int idx)
 999{
1000        unsigned long pfc_combined;
1001        int i, prio;
1002
1003        pfc_combined = mlx5e_query_pfc_combined(priv);
1004        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1005                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1006                        char pfc_string[ETH_GSTRING_LEN];
1007
1008                        snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1009                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1010                                pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1011                }
1012        }
1013
1014        if (mlx5e_query_global_pause_combined(priv)) {
1015                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1016                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1017                                pport_per_prio_pfc_stats_desc[i].format, "global");
1018                }
1019        }
1020
1021        for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1022                strcpy(data + (idx++) * ETH_GSTRING_LEN,
1023                       pport_pfc_stall_stats_desc[i].format);
1024
1025        return idx;
1026}
1027
1028static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1029                                             u64 *data,
1030                                             int idx)
1031{
1032        unsigned long pfc_combined;
1033        int i, prio;
1034
1035        pfc_combined = mlx5e_query_pfc_combined(priv);
1036        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1037                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1038                        data[idx++] =
1039                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1040                                                    pport_per_prio_pfc_stats_desc, i);
1041                }
1042        }
1043
1044        if (mlx5e_query_global_pause_combined(priv)) {
1045                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1046                        data[idx++] =
1047                                MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1048                                                    pport_per_prio_pfc_stats_desc, i);
1049                }
1050        }
1051
1052        for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1053                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1054                                                  pport_pfc_stall_stats_desc, i);
1055
1056        return idx;
1057}
1058
1059static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1060{
1061        return mlx5e_grp_per_prio_traffic_get_num_stats() +
1062                mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1063}
1064
1065static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1066                                           int idx)
1067{
1068        idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1069        idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1070        return idx;
1071}
1072
1073static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1074                                         int idx)
1075{
1076        idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1077        idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1078        return idx;
1079}
1080
1081static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1082{
1083        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1084        struct mlx5_core_dev *mdev = priv->mdev;
1085        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1086        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1087        int prio;
1088        void *out;
1089
1090        if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1091                return;
1092
1093        MLX5_SET(ppcnt_reg, in, local_port, 1);
1094        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1095        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1096                out = pstats->per_prio_counters[prio];
1097                MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1098                mlx5_core_access_reg(mdev, in, sz, out, sz,
1099                                     MLX5_REG_PPCNT, 0, 0);
1100        }
1101}
1102
1103static const struct counter_desc mlx5e_pme_status_desc[] = {
1104        { "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1105};
1106
1107static const struct counter_desc mlx5e_pme_error_desc[] = {
1108        { "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1109        { "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1110        { "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1111};
1112
1113#define NUM_PME_STATUS_STATS            ARRAY_SIZE(mlx5e_pme_status_desc)
1114#define NUM_PME_ERR_STATS               ARRAY_SIZE(mlx5e_pme_error_desc)
1115
1116static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1117{
1118        return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1119}
1120
1121static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1122                                      int idx)
1123{
1124        int i;
1125
1126        for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1127                strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1128
1129        for (i = 0; i < NUM_PME_ERR_STATS; i++)
1130                strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1131
1132        return idx;
1133}
1134
1135static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1136                                    int idx)
1137{
1138        struct mlx5_pme_stats pme_stats;
1139        int i;
1140
1141        mlx5_get_pme_stats(priv->mdev, &pme_stats);
1142
1143        for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1144                data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1145                                                   mlx5e_pme_status_desc, i);
1146
1147        for (i = 0; i < NUM_PME_ERR_STATS; i++)
1148                data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1149                                                   mlx5e_pme_error_desc, i);
1150
1151        return idx;
1152}
1153
1154static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1155{
1156        return mlx5e_ipsec_get_count(priv);
1157}
1158
1159static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1160                                        int idx)
1161{
1162        return idx + mlx5e_ipsec_get_strings(priv,
1163                                             data + idx * ETH_GSTRING_LEN);
1164}
1165
1166static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1167                                      int idx)
1168{
1169        return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1170}
1171
1172static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1173{
1174        mlx5e_ipsec_update_stats(priv);
1175}
1176
1177static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1178{
1179        return mlx5e_tls_get_count(priv);
1180}
1181
1182static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1183                                      int idx)
1184{
1185        return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1186}
1187
1188static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1189{
1190        return idx + mlx5e_tls_get_stats(priv, data + idx);
1191}
1192
1193static const struct counter_desc rq_stats_desc[] = {
1194        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1195        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1196        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1197        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1198        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1199        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1200        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1201        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1202        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1203        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1204        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1205        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1206        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1207        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1208        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1209        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1210        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1211        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1212        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1213        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1214        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1215        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
1216        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1217        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1218        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1219        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1220        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1221        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1222        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1223};
1224
1225static const struct counter_desc sq_stats_desc[] = {
1226        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1227        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1228        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1229        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1230        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1231        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1232        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1233        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1234        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1235        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1236        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1237        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1238        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1239        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1240        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1241        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1242        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1243        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1244};
1245
1246static const struct counter_desc rq_xdpsq_stats_desc[] = {
1247        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1248        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1249        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1250        { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1251};
1252
1253static const struct counter_desc xdpsq_stats_desc[] = {
1254        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1255        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1256        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1257        { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1258};
1259
1260static const struct counter_desc ch_stats_desc[] = {
1261        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1262        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1263        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1264        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1265        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1266};
1267
1268#define NUM_RQ_STATS                    ARRAY_SIZE(rq_stats_desc)
1269#define NUM_SQ_STATS                    ARRAY_SIZE(sq_stats_desc)
1270#define NUM_XDPSQ_STATS                 ARRAY_SIZE(xdpsq_stats_desc)
1271#define NUM_RQ_XDPSQ_STATS              ARRAY_SIZE(rq_xdpsq_stats_desc)
1272#define NUM_CH_STATS                    ARRAY_SIZE(ch_stats_desc)
1273
1274static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1275{
1276        int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1277
1278        return (NUM_RQ_STATS * max_nch) +
1279               (NUM_CH_STATS * max_nch) +
1280               (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1281               (NUM_RQ_XDPSQ_STATS * max_nch) +
1282               (NUM_XDPSQ_STATS * max_nch);
1283}
1284
1285static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1286                                           int idx)
1287{
1288        int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1289        int i, j, tc;
1290
1291        for (i = 0; i < max_nch; i++)
1292                for (j = 0; j < NUM_CH_STATS; j++)
1293                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1294                                ch_stats_desc[j].format, i);
1295
1296        for (i = 0; i < max_nch; i++) {
1297                for (j = 0; j < NUM_RQ_STATS; j++)
1298                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1299                                rq_stats_desc[j].format, i);
1300                for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1301                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1302                                rq_xdpsq_stats_desc[j].format, i);
1303        }
1304
1305        for (tc = 0; tc < priv->max_opened_tc; tc++)
1306                for (i = 0; i < max_nch; i++)
1307                        for (j = 0; j < NUM_SQ_STATS; j++)
1308                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
1309                                        sq_stats_desc[j].format,
1310                                        priv->channel_tc2txq[i][tc]);
1311
1312        for (i = 0; i < max_nch; i++)
1313                for (j = 0; j < NUM_XDPSQ_STATS; j++)
1314                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
1315                                xdpsq_stats_desc[j].format, i);
1316
1317        return idx;
1318}
1319
1320static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1321                                         int idx)
1322{
1323        int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1324        int i, j, tc;
1325
1326        for (i = 0; i < max_nch; i++)
1327                for (j = 0; j < NUM_CH_STATS; j++)
1328                        data[idx++] =
1329                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1330                                                     ch_stats_desc, j);
1331
1332        for (i = 0; i < max_nch; i++) {
1333                for (j = 0; j < NUM_RQ_STATS; j++)
1334                        data[idx++] =
1335                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1336                                                     rq_stats_desc, j);
1337                for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1338                        data[idx++] =
1339                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1340                                                     rq_xdpsq_stats_desc, j);
1341        }
1342
1343        for (tc = 0; tc < priv->max_opened_tc; tc++)
1344                for (i = 0; i < max_nch; i++)
1345                        for (j = 0; j < NUM_SQ_STATS; j++)
1346                                data[idx++] =
1347                                        MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1348                                                             sq_stats_desc, j);
1349
1350        for (i = 0; i < max_nch; i++)
1351                for (j = 0; j < NUM_XDPSQ_STATS; j++)
1352                        data[idx++] =
1353                                MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1354                                                     xdpsq_stats_desc, j);
1355
1356        return idx;
1357}
1358
1359/* The stats groups order is opposite to the update_stats() order calls */
1360const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1361        {
1362                .get_num_stats = mlx5e_grp_sw_get_num_stats,
1363                .fill_strings = mlx5e_grp_sw_fill_strings,
1364                .fill_stats = mlx5e_grp_sw_fill_stats,
1365                .update_stats = mlx5e_grp_sw_update_stats,
1366        },
1367        {
1368                .get_num_stats = mlx5e_grp_q_get_num_stats,
1369                .fill_strings = mlx5e_grp_q_fill_strings,
1370                .fill_stats = mlx5e_grp_q_fill_stats,
1371                .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1372                .update_stats = mlx5e_grp_q_update_stats,
1373        },
1374        {
1375                .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1376                .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1377                .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1378                .update_stats = mlx5e_grp_vnic_env_update_stats,
1379        },
1380        {
1381                .get_num_stats = mlx5e_grp_vport_get_num_stats,
1382                .fill_strings = mlx5e_grp_vport_fill_strings,
1383                .fill_stats = mlx5e_grp_vport_fill_stats,
1384                .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1385                .update_stats = mlx5e_grp_vport_update_stats,
1386        },
1387        {
1388                .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1389                .fill_strings = mlx5e_grp_802_3_fill_strings,
1390                .fill_stats = mlx5e_grp_802_3_fill_stats,
1391                .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1392                .update_stats = mlx5e_grp_802_3_update_stats,
1393        },
1394        {
1395                .get_num_stats = mlx5e_grp_2863_get_num_stats,
1396                .fill_strings = mlx5e_grp_2863_fill_strings,
1397                .fill_stats = mlx5e_grp_2863_fill_stats,
1398                .update_stats = mlx5e_grp_2863_update_stats,
1399        },
1400        {
1401                .get_num_stats = mlx5e_grp_2819_get_num_stats,
1402                .fill_strings = mlx5e_grp_2819_fill_strings,
1403                .fill_stats = mlx5e_grp_2819_fill_stats,
1404                .update_stats = mlx5e_grp_2819_update_stats,
1405        },
1406        {
1407                .get_num_stats = mlx5e_grp_phy_get_num_stats,
1408                .fill_strings = mlx5e_grp_phy_fill_strings,
1409                .fill_stats = mlx5e_grp_phy_fill_stats,
1410                .update_stats = mlx5e_grp_phy_update_stats,
1411        },
1412        {
1413                .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1414                .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1415                .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1416                .update_stats = mlx5e_grp_eth_ext_update_stats,
1417        },
1418        {
1419                .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1420                .fill_strings = mlx5e_grp_pcie_fill_strings,
1421                .fill_stats = mlx5e_grp_pcie_fill_stats,
1422                .update_stats = mlx5e_grp_pcie_update_stats,
1423        },
1424        {
1425                .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1426                .fill_strings = mlx5e_grp_per_prio_fill_strings,
1427                .fill_stats = mlx5e_grp_per_prio_fill_stats,
1428                .update_stats = mlx5e_grp_per_prio_update_stats,
1429        },
1430        {
1431                .get_num_stats = mlx5e_grp_pme_get_num_stats,
1432                .fill_strings = mlx5e_grp_pme_fill_strings,
1433                .fill_stats = mlx5e_grp_pme_fill_stats,
1434        },
1435        {
1436                .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1437                .fill_strings = mlx5e_grp_ipsec_fill_strings,
1438                .fill_stats = mlx5e_grp_ipsec_fill_stats,
1439                .update_stats = mlx5e_grp_ipsec_update_stats,
1440        },
1441        {
1442                .get_num_stats = mlx5e_grp_tls_get_num_stats,
1443                .fill_strings = mlx5e_grp_tls_fill_strings,
1444                .fill_stats = mlx5e_grp_tls_fill_stats,
1445        },
1446        {
1447                .get_num_stats = mlx5e_grp_channels_get_num_stats,
1448                .fill_strings = mlx5e_grp_channels_fill_strings,
1449                .fill_stats = mlx5e_grp_channels_fill_stats,
1450        }
1451};
1452
1453const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
1454