1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "lib/mlx5.h"
34#include "en.h"
35#include "en_accel/tls.h"
36#include "en_accel/en_accel.h"
37
38static unsigned int stats_grps_num(struct mlx5e_priv *priv)
39{
40 return !priv->profile->stats_grps_num ? 0 :
41 priv->profile->stats_grps_num(priv);
42}
43
44unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
45{
46 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47 const unsigned int num_stats_grps = stats_grps_num(priv);
48 unsigned int total = 0;
49 int i;
50
51 for (i = 0; i < num_stats_grps; i++)
52 total += stats_grps[i]->get_num_stats(priv);
53
54 return total;
55}
56
57void mlx5e_stats_update(struct mlx5e_priv *priv)
58{
59 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60 const unsigned int num_stats_grps = stats_grps_num(priv);
61 int i;
62
63 for (i = num_stats_grps - 1; i >= 0; i--)
64 if (stats_grps[i]->update_stats)
65 stats_grps[i]->update_stats(priv);
66}
67
68void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
69{
70 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
71 const unsigned int num_stats_grps = stats_grps_num(priv);
72 int i;
73
74 for (i = 0; i < num_stats_grps; i++)
75 idx = stats_grps[i]->fill_stats(priv, data, idx);
76}
77
78void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
79{
80 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
81 const unsigned int num_stats_grps = stats_grps_num(priv);
82 int i, idx = 0;
83
84 for (i = 0; i < num_stats_grps; i++)
85 idx = stats_grps[i]->fill_strings(priv, data, idx);
86}
87
88
89
90static const struct counter_desc sw_stats_desc[] = {
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
101
102#ifdef CONFIG_MLX5_EN_TLS
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
113#endif
114
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
197};
198
199#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
200
201static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
202{
203 return NUM_SW_COUNTERS;
204}
205
206static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
207{
208 int i;
209
210 for (i = 0; i < NUM_SW_COUNTERS; i++)
211 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
212 return idx;
213}
214
215static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
216{
217 int i;
218
219 for (i = 0; i < NUM_SW_COUNTERS; i++)
220 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
221 return idx;
222}
223
224static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
225{
226 struct mlx5e_sw_stats *s = &priv->stats.sw;
227 int i;
228
229 memset(s, 0, sizeof(*s));
230
231 for (i = 0; i < priv->max_nch; i++) {
232 struct mlx5e_channel_stats *channel_stats =
233 &priv->channel_stats[i];
234 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
235 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
236 struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
237 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
238 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
239 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
240 int j;
241
242 s->rx_packets += rq_stats->packets;
243 s->rx_bytes += rq_stats->bytes;
244 s->rx_lro_packets += rq_stats->lro_packets;
245 s->rx_lro_bytes += rq_stats->lro_bytes;
246 s->rx_ecn_mark += rq_stats->ecn_mark;
247 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
248 s->rx_csum_none += rq_stats->csum_none;
249 s->rx_csum_complete += rq_stats->csum_complete;
250 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
251 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
252 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
253 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
254 s->rx_xdp_drop += rq_stats->xdp_drop;
255 s->rx_xdp_redirect += rq_stats->xdp_redirect;
256 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
257 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
258 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
259 s->rx_xdp_tx_nops += xdpsq_stats->nops;
260 s->rx_xdp_tx_full += xdpsq_stats->full;
261 s->rx_xdp_tx_err += xdpsq_stats->err;
262 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
263 s->rx_wqe_err += rq_stats->wqe_err;
264 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
265 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
266 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
267 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
268 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
269 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
270 s->rx_cache_reuse += rq_stats->cache_reuse;
271 s->rx_cache_full += rq_stats->cache_full;
272 s->rx_cache_empty += rq_stats->cache_empty;
273 s->rx_cache_busy += rq_stats->cache_busy;
274 s->rx_cache_waive += rq_stats->cache_waive;
275 s->rx_congst_umr += rq_stats->congst_umr;
276 s->rx_arfs_err += rq_stats->arfs_err;
277 s->rx_recover += rq_stats->recover;
278 s->ch_events += ch_stats->events;
279 s->ch_poll += ch_stats->poll;
280 s->ch_arm += ch_stats->arm;
281 s->ch_aff_change += ch_stats->aff_change;
282 s->ch_force_irq += ch_stats->force_irq;
283 s->ch_eq_rearm += ch_stats->eq_rearm;
284
285 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
286 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
287 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
288 s->tx_xdp_nops += xdpsq_red_stats->nops;
289 s->tx_xdp_full += xdpsq_red_stats->full;
290 s->tx_xdp_err += xdpsq_red_stats->err;
291 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
292
293 s->rx_xsk_packets += xskrq_stats->packets;
294 s->rx_xsk_bytes += xskrq_stats->bytes;
295 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
296 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
297 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
298 s->rx_xsk_csum_none += xskrq_stats->csum_none;
299 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
300 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
301 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
302 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
303 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
304 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
305 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
306 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
307 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
308 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
309 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
310 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
311 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
312 s->tx_xsk_xmit += xsksq_stats->xmit;
313 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
314 s->tx_xsk_inlnw += xsksq_stats->inlnw;
315 s->tx_xsk_full += xsksq_stats->full;
316 s->tx_xsk_err += xsksq_stats->err;
317 s->tx_xsk_cqes += xsksq_stats->cqes;
318
319 for (j = 0; j < priv->max_opened_tc; j++) {
320 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
321
322 s->tx_packets += sq_stats->packets;
323 s->tx_bytes += sq_stats->bytes;
324 s->tx_tso_packets += sq_stats->tso_packets;
325 s->tx_tso_bytes += sq_stats->tso_bytes;
326 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
327 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
328 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
329 s->tx_nop += sq_stats->nop;
330 s->tx_queue_stopped += sq_stats->stopped;
331 s->tx_queue_wake += sq_stats->wake;
332 s->tx_queue_dropped += sq_stats->dropped;
333 s->tx_cqe_err += sq_stats->cqe_err;
334 s->tx_recover += sq_stats->recover;
335 s->tx_xmit_more += sq_stats->xmit_more;
336 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
337 s->tx_csum_none += sq_stats->csum_none;
338 s->tx_csum_partial += sq_stats->csum_partial;
339#ifdef CONFIG_MLX5_EN_TLS
340 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
341 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
342 s->tx_tls_ctx += sq_stats->tls_ctx;
343 s->tx_tls_ooo += sq_stats->tls_ooo;
344 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
345 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
346 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
347 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
348 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
349 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
350#endif
351 s->tx_cqes += sq_stats->cqes;
352
353
354 barrier();
355 }
356 }
357}
358
359static const struct counter_desc q_stats_desc[] = {
360 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
361};
362
363static const struct counter_desc drop_rq_stats_desc[] = {
364 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
365};
366
367#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
368#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
369
370static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
371{
372 int num_stats = 0;
373
374 if (priv->q_counter)
375 num_stats += NUM_Q_COUNTERS;
376
377 if (priv->drop_rq_q_counter)
378 num_stats += NUM_DROP_RQ_COUNTERS;
379
380 return num_stats;
381}
382
383static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
384{
385 int i;
386
387 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
388 strcpy(data + (idx++) * ETH_GSTRING_LEN,
389 q_stats_desc[i].format);
390
391 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
392 strcpy(data + (idx++) * ETH_GSTRING_LEN,
393 drop_rq_stats_desc[i].format);
394
395 return idx;
396}
397
398static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
399{
400 int i;
401
402 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
403 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
404 q_stats_desc, i);
405 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
406 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
407 drop_rq_stats_desc, i);
408 return idx;
409}
410
411static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
412{
413 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
414 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
415 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
416 int ret;
417
418 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
419
420 if (priv->q_counter) {
421 MLX5_SET(query_q_counter_in, in, counter_set_id,
422 priv->q_counter);
423 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
424 if (!ret)
425 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
426 out, out_of_buffer);
427 }
428
429 if (priv->drop_rq_q_counter) {
430 MLX5_SET(query_q_counter_in, in, counter_set_id,
431 priv->drop_rq_q_counter);
432 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
433 if (!ret)
434 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
435 out, out_of_buffer);
436 }
437}
438
439#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
440static const struct counter_desc vnic_env_stats_steer_desc[] = {
441 { "rx_steer_missed_packets",
442 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
443};
444
445static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
446 { "dev_internal_queue_oob",
447 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
448};
449
450#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
451 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
452 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
453#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
454 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
455 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
456
457static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
458{
459 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
460 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
461}
462
463static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
464{
465 int i;
466
467 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
468 strcpy(data + (idx++) * ETH_GSTRING_LEN,
469 vnic_env_stats_steer_desc[i].format);
470
471 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
472 strcpy(data + (idx++) * ETH_GSTRING_LEN,
473 vnic_env_stats_dev_oob_desc[i].format);
474 return idx;
475}
476
477static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
478{
479 int i;
480
481 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
482 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
483 vnic_env_stats_steer_desc, i);
484
485 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
486 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
487 vnic_env_stats_dev_oob_desc, i);
488 return idx;
489}
490
491static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
492{
493 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
494 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
495 struct mlx5_core_dev *mdev = priv->mdev;
496
497 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
498 return;
499
500 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
501 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
502}
503
504#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
505static const struct counter_desc vport_stats_desc[] = {
506 { "rx_vport_unicast_packets",
507 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
508 { "rx_vport_unicast_bytes",
509 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
510 { "tx_vport_unicast_packets",
511 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
512 { "tx_vport_unicast_bytes",
513 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
514 { "rx_vport_multicast_packets",
515 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
516 { "rx_vport_multicast_bytes",
517 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
518 { "tx_vport_multicast_packets",
519 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
520 { "tx_vport_multicast_bytes",
521 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
522 { "rx_vport_broadcast_packets",
523 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
524 { "rx_vport_broadcast_bytes",
525 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
526 { "tx_vport_broadcast_packets",
527 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
528 { "tx_vport_broadcast_bytes",
529 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
530 { "rx_vport_rdma_unicast_packets",
531 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
532 { "rx_vport_rdma_unicast_bytes",
533 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
534 { "tx_vport_rdma_unicast_packets",
535 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
536 { "tx_vport_rdma_unicast_bytes",
537 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
538 { "rx_vport_rdma_multicast_packets",
539 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
540 { "rx_vport_rdma_multicast_bytes",
541 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
542 { "tx_vport_rdma_multicast_packets",
543 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
544 { "tx_vport_rdma_multicast_bytes",
545 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
546};
547
548#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
549
550static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
551{
552 return NUM_VPORT_COUNTERS;
553}
554
555static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
556{
557 int i;
558
559 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
560 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
561 return idx;
562}
563
564static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
565{
566 int i;
567
568 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
569 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
570 vport_stats_desc, i);
571 return idx;
572}
573
574static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
575{
576 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
577 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
578 struct mlx5_core_dev *mdev = priv->mdev;
579
580 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
581 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
582}
583
584#define PPORT_802_3_OFF(c) \
585 MLX5_BYTE_OFF(ppcnt_reg, \
586 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
587static const struct counter_desc pport_802_3_stats_desc[] = {
588 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
589 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
590 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
591 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
592 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
593 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
594 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
595 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
596 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
597 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
598 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
599 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
600 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
601 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
602 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
603 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
604 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
605 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
606};
607
608#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
609
610static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
611{
612 return NUM_PPORT_802_3_COUNTERS;
613}
614
615static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
616{
617 int i;
618
619 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
620 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
621 return idx;
622}
623
624static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
625{
626 int i;
627
628 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
629 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
630 pport_802_3_stats_desc, i);
631 return idx;
632}
633
634#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
635 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
636
637static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
638{
639 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
640 struct mlx5_core_dev *mdev = priv->mdev;
641 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
642 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
643 void *out;
644
645 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
646 return;
647
648 MLX5_SET(ppcnt_reg, in, local_port, 1);
649 out = pstats->IEEE_802_3_counters;
650 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
651 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
652}
653
654#define PPORT_2863_OFF(c) \
655 MLX5_BYTE_OFF(ppcnt_reg, \
656 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
657static const struct counter_desc pport_2863_stats_desc[] = {
658 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
659 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
660 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
661};
662
663#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
664
665static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
666{
667 return NUM_PPORT_2863_COUNTERS;
668}
669
670static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
671{
672 int i;
673
674 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
675 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
676 return idx;
677}
678
679static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
680{
681 int i;
682
683 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
684 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
685 pport_2863_stats_desc, i);
686 return idx;
687}
688
689static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
690{
691 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
692 struct mlx5_core_dev *mdev = priv->mdev;
693 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
694 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
695 void *out;
696
697 MLX5_SET(ppcnt_reg, in, local_port, 1);
698 out = pstats->RFC_2863_counters;
699 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
700 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
701}
702
703#define PPORT_2819_OFF(c) \
704 MLX5_BYTE_OFF(ppcnt_reg, \
705 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
706static const struct counter_desc pport_2819_stats_desc[] = {
707 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
708 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
709 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
710 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
711 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
712 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
713 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
714 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
715 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
716 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
717 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
718 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
719 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
720};
721
722#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
723
724static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
725{
726 return NUM_PPORT_2819_COUNTERS;
727}
728
729static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
730{
731 int i;
732
733 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
734 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
735 return idx;
736}
737
738static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
739{
740 int i;
741
742 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
743 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
744 pport_2819_stats_desc, i);
745 return idx;
746}
747
748static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
749{
750 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
751 struct mlx5_core_dev *mdev = priv->mdev;
752 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
753 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
754 void *out;
755
756 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
757 return;
758
759 MLX5_SET(ppcnt_reg, in, local_port, 1);
760 out = pstats->RFC_2819_counters;
761 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
762 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
763}
764
765#define PPORT_PHY_STATISTICAL_OFF(c) \
766 MLX5_BYTE_OFF(ppcnt_reg, \
767 counter_set.phys_layer_statistical_cntrs.c##_high)
768static const struct counter_desc pport_phy_statistical_stats_desc[] = {
769 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
770 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
771};
772
773static const struct counter_desc
774pport_phy_statistical_err_lanes_stats_desc[] = {
775 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
776 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
777 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
778 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
779};
780
781#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
782 ARRAY_SIZE(pport_phy_statistical_stats_desc)
783#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
784 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
785
786static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
787{
788 struct mlx5_core_dev *mdev = priv->mdev;
789 int num_stats;
790
791
792 num_stats = 1;
793
794 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
795 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
796
797 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
798 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
799
800 return num_stats;
801}
802
803static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
804{
805 struct mlx5_core_dev *mdev = priv->mdev;
806 int i;
807
808 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
809
810 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
811 return idx;
812
813 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
814 strcpy(data + (idx++) * ETH_GSTRING_LEN,
815 pport_phy_statistical_stats_desc[i].format);
816
817 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
818 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
819 strcpy(data + (idx++) * ETH_GSTRING_LEN,
820 pport_phy_statistical_err_lanes_stats_desc[i].format);
821
822 return idx;
823}
824
825static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
826{
827 struct mlx5_core_dev *mdev = priv->mdev;
828 int i;
829
830
831 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
832 counter_set.phys_layer_cntrs.link_down_events);
833
834 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
835 return idx;
836
837 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
838 data[idx++] =
839 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
840 pport_phy_statistical_stats_desc, i);
841
842 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
843 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
844 data[idx++] =
845 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
846 pport_phy_statistical_err_lanes_stats_desc,
847 i);
848 return idx;
849}
850
851static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
852{
853 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
854 struct mlx5_core_dev *mdev = priv->mdev;
855 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
856 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
857 void *out;
858
859 MLX5_SET(ppcnt_reg, in, local_port, 1);
860 out = pstats->phy_counters;
861 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
862 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
863
864 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
865 return;
866
867 out = pstats->phy_statistical_counters;
868 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
869 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
870}
871
872#define PPORT_ETH_EXT_OFF(c) \
873 MLX5_BYTE_OFF(ppcnt_reg, \
874 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
875static const struct counter_desc pport_eth_ext_stats_desc[] = {
876 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
877};
878
879#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
880
881static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
882{
883 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
884 return NUM_PPORT_ETH_EXT_COUNTERS;
885
886 return 0;
887}
888
889static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
890{
891 int i;
892
893 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
894 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
895 strcpy(data + (idx++) * ETH_GSTRING_LEN,
896 pport_eth_ext_stats_desc[i].format);
897 return idx;
898}
899
900static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
901{
902 int i;
903
904 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
905 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
906 data[idx++] =
907 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
908 pport_eth_ext_stats_desc, i);
909 return idx;
910}
911
912static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
913{
914 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
915 struct mlx5_core_dev *mdev = priv->mdev;
916 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
917 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
918 void *out;
919
920 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
921 return;
922
923 MLX5_SET(ppcnt_reg, in, local_port, 1);
924 out = pstats->eth_ext_counters;
925 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
926 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
927}
928
929#define PCIE_PERF_OFF(c) \
930 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
931static const struct counter_desc pcie_perf_stats_desc[] = {
932 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
933 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
934};
935
936#define PCIE_PERF_OFF64(c) \
937 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
938static const struct counter_desc pcie_perf_stats_desc64[] = {
939 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
940};
941
942static const struct counter_desc pcie_perf_stall_stats_desc[] = {
943 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
944 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
945 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
946 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
947};
948
949#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
950#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
951#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
952
953static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
954{
955 int num_stats = 0;
956
957 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
958 num_stats += NUM_PCIE_PERF_COUNTERS;
959
960 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
961 num_stats += NUM_PCIE_PERF_COUNTERS64;
962
963 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
964 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
965
966 return num_stats;
967}
968
969static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
970{
971 int i;
972
973 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
974 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
975 strcpy(data + (idx++) * ETH_GSTRING_LEN,
976 pcie_perf_stats_desc[i].format);
977
978 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
979 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
980 strcpy(data + (idx++) * ETH_GSTRING_LEN,
981 pcie_perf_stats_desc64[i].format);
982
983 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
984 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
985 strcpy(data + (idx++) * ETH_GSTRING_LEN,
986 pcie_perf_stall_stats_desc[i].format);
987 return idx;
988}
989
990static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
991{
992 int i;
993
994 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
995 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
996 data[idx++] =
997 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
998 pcie_perf_stats_desc, i);
999
1000 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1001 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1002 data[idx++] =
1003 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1004 pcie_perf_stats_desc64, i);
1005
1006 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1007 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1008 data[idx++] =
1009 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1010 pcie_perf_stall_stats_desc, i);
1011 return idx;
1012}
1013
1014static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1015{
1016 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1017 struct mlx5_core_dev *mdev = priv->mdev;
1018 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1019 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1020 void *out;
1021
1022 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1023 return;
1024
1025 out = pcie_stats->pcie_perf_counters;
1026 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1027 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1028}
1029
1030#define PPORT_PER_TC_PRIO_OFF(c) \
1031 MLX5_BYTE_OFF(ppcnt_reg, \
1032 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1033
1034static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1035 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1036};
1037
1038#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1039
1040#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1041 MLX5_BYTE_OFF(ppcnt_reg, \
1042 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1043
1044static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1045 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1046 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1047};
1048
1049#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1050 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1051
1052static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1053{
1054 struct mlx5_core_dev *mdev = priv->mdev;
1055
1056 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1057 return 0;
1058
1059 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1060}
1061
1062static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1063{
1064 struct mlx5_core_dev *mdev = priv->mdev;
1065 int i, prio;
1066
1067 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1068 return idx;
1069
1070 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1071 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1072 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1073 pport_per_tc_prio_stats_desc[i].format, prio);
1074 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1075 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1076 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1077 }
1078
1079 return idx;
1080}
1081
1082static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1083{
1084 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1085 struct mlx5_core_dev *mdev = priv->mdev;
1086 int i, prio;
1087
1088 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1089 return idx;
1090
1091 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1092 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1093 data[idx++] =
1094 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1095 pport_per_tc_prio_stats_desc, i);
1096 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1097 data[idx++] =
1098 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1099 pport_per_tc_congest_prio_stats_desc, i);
1100 }
1101
1102 return idx;
1103}
1104
1105static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1106{
1107 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1108 struct mlx5_core_dev *mdev = priv->mdev;
1109 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1110 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1111 void *out;
1112 int prio;
1113
1114 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1115 return;
1116
1117 MLX5_SET(ppcnt_reg, in, pnat, 2);
1118 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1119 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1120 out = pstats->per_tc_prio_counters[prio];
1121 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1122 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1123 }
1124}
1125
1126static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1127{
1128 struct mlx5_core_dev *mdev = priv->mdev;
1129
1130 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1131 return 0;
1132
1133 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1134}
1135
1136static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1137{
1138 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1139 struct mlx5_core_dev *mdev = priv->mdev;
1140 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1141 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1142 void *out;
1143 int prio;
1144
1145 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1146 return;
1147
1148 MLX5_SET(ppcnt_reg, in, pnat, 2);
1149 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1150 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1151 out = pstats->per_tc_congest_prio_counters[prio];
1152 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1153 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1154 }
1155}
1156
1157static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1158{
1159 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1160 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1161}
1162
1163static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1164{
1165 mlx5e_grp_per_tc_prio_update_stats(priv);
1166 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1167}
1168
1169#define PPORT_PER_PRIO_OFF(c) \
1170 MLX5_BYTE_OFF(ppcnt_reg, \
1171 counter_set.eth_per_prio_grp_data_layout.c##_high)
1172static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1173 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1174 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1175 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1176 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1177 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1178};
1179
1180#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1181
1182static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1183{
1184 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1185}
1186
1187static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1188 u8 *data,
1189 int idx)
1190{
1191 int i, prio;
1192
1193 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1194 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1195 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1196 pport_per_prio_traffic_stats_desc[i].format, prio);
1197 }
1198
1199 return idx;
1200}
1201
1202static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1203 u64 *data,
1204 int idx)
1205{
1206 int i, prio;
1207
1208 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1209 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1210 data[idx++] =
1211 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1212 pport_per_prio_traffic_stats_desc, i);
1213 }
1214
1215 return idx;
1216}
1217
1218static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1219
1220 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1221 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1222 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1223 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1224 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1225};
1226
1227static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1228 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1229 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1230};
1231
1232#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1233#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1234 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1235 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1236
1237static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1238{
1239 struct mlx5_core_dev *mdev = priv->mdev;
1240 u8 pfc_en_tx;
1241 u8 pfc_en_rx;
1242 int err;
1243
1244 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1245 return 0;
1246
1247 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1248
1249 return err ? 0 : pfc_en_tx | pfc_en_rx;
1250}
1251
1252static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1253{
1254 struct mlx5_core_dev *mdev = priv->mdev;
1255 u32 rx_pause;
1256 u32 tx_pause;
1257 int err;
1258
1259 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1260 return false;
1261
1262 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1263
1264 return err ? false : rx_pause | tx_pause;
1265}
1266
1267static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1268{
1269 return (mlx5e_query_global_pause_combined(priv) +
1270 hweight8(mlx5e_query_pfc_combined(priv))) *
1271 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1272 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1273}
1274
1275static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1276 u8 *data,
1277 int idx)
1278{
1279 unsigned long pfc_combined;
1280 int i, prio;
1281
1282 pfc_combined = mlx5e_query_pfc_combined(priv);
1283 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1284 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1285 char pfc_string[ETH_GSTRING_LEN];
1286
1287 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1288 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1289 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1290 }
1291 }
1292
1293 if (mlx5e_query_global_pause_combined(priv)) {
1294 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1295 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1296 pport_per_prio_pfc_stats_desc[i].format, "global");
1297 }
1298 }
1299
1300 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1301 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1302 pport_pfc_stall_stats_desc[i].format);
1303
1304 return idx;
1305}
1306
1307static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1308 u64 *data,
1309 int idx)
1310{
1311 unsigned long pfc_combined;
1312 int i, prio;
1313
1314 pfc_combined = mlx5e_query_pfc_combined(priv);
1315 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1316 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1317 data[idx++] =
1318 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1319 pport_per_prio_pfc_stats_desc, i);
1320 }
1321 }
1322
1323 if (mlx5e_query_global_pause_combined(priv)) {
1324 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1325 data[idx++] =
1326 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1327 pport_per_prio_pfc_stats_desc, i);
1328 }
1329 }
1330
1331 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1332 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1333 pport_pfc_stall_stats_desc, i);
1334
1335 return idx;
1336}
1337
1338static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1339{
1340 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1341 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1342}
1343
1344static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1345{
1346 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1347 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1348 return idx;
1349}
1350
1351static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1352{
1353 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1354 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1355 return idx;
1356}
1357
1358static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1359{
1360 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1361 struct mlx5_core_dev *mdev = priv->mdev;
1362 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1363 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1364 int prio;
1365 void *out;
1366
1367 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1368 return;
1369
1370 MLX5_SET(ppcnt_reg, in, local_port, 1);
1371 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1372 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1373 out = pstats->per_prio_counters[prio];
1374 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1375 mlx5_core_access_reg(mdev, in, sz, out, sz,
1376 MLX5_REG_PPCNT, 0, 0);
1377 }
1378}
1379
1380static const struct counter_desc mlx5e_pme_status_desc[] = {
1381 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1382};
1383
1384static const struct counter_desc mlx5e_pme_error_desc[] = {
1385 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1386 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1387 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1388};
1389
1390#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1391#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1392
1393static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1394{
1395 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1396}
1397
1398static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1399{
1400 int i;
1401
1402 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1403 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1404
1405 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1406 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1407
1408 return idx;
1409}
1410
1411static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1412{
1413 struct mlx5_pme_stats pme_stats;
1414 int i;
1415
1416 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1417
1418 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1419 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1420 mlx5e_pme_status_desc, i);
1421
1422 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1423 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1424 mlx5e_pme_error_desc, i);
1425
1426 return idx;
1427}
1428
1429static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1430
1431static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1432{
1433 return mlx5e_tls_get_count(priv);
1434}
1435
1436static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1437{
1438 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1439}
1440
1441static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1442{
1443 return idx + mlx5e_tls_get_stats(priv, data + idx);
1444}
1445
1446static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1447
1448static const struct counter_desc rq_stats_desc[] = {
1449 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1450 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1451 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1452 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1453 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1454 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1455 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1456 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1457 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1458 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1459 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1460 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1461 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1462 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1463 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1464 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1465 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1466 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1467 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1468 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1469 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1470 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1471 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1472 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1473 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1474 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1475 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1476 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1477 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1478};
1479
1480static const struct counter_desc sq_stats_desc[] = {
1481 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1482 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1483 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1484 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1485 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1486 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1487 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1488 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1489 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1490 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1491#ifdef CONFIG_MLX5_EN_TLS
1492 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1493 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1494 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1495 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1496 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1497 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1498 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1499 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1500 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1501 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1502#endif
1503 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1504 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1505 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1506 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1507 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1508 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1509 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1510 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1511};
1512
1513static const struct counter_desc rq_xdpsq_stats_desc[] = {
1514 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1515 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1516 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1517 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1518 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1519 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1520 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1521};
1522
1523static const struct counter_desc xdpsq_stats_desc[] = {
1524 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1525 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1526 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1527 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1528 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1529 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1530 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1531};
1532
1533static const struct counter_desc xskrq_stats_desc[] = {
1534 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1535 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1536 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1537 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1538 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1539 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1540 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1541 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1542 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1543 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1544 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1545 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1546 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1547 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1548 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1549 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1550 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1551 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1552 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1553};
1554
1555static const struct counter_desc xsksq_stats_desc[] = {
1556 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1557 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1558 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1559 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1560 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1561 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1562};
1563
1564static const struct counter_desc ch_stats_desc[] = {
1565 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1566 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1567 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1568 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1569 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1570 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1571};
1572
1573#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1574#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1575#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1576#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1577#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1578#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
1579#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1580
1581static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1582{
1583 int max_nch = priv->max_nch;
1584
1585 return (NUM_RQ_STATS * max_nch) +
1586 (NUM_CH_STATS * max_nch) +
1587 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1588 (NUM_RQ_XDPSQ_STATS * max_nch) +
1589 (NUM_XDPSQ_STATS * max_nch) +
1590 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1591 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1592}
1593
1594static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1595{
1596 bool is_xsk = priv->xsk.ever_used;
1597 int max_nch = priv->max_nch;
1598 int i, j, tc;
1599
1600 for (i = 0; i < max_nch; i++)
1601 for (j = 0; j < NUM_CH_STATS; j++)
1602 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1603 ch_stats_desc[j].format, i);
1604
1605 for (i = 0; i < max_nch; i++) {
1606 for (j = 0; j < NUM_RQ_STATS; j++)
1607 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1608 rq_stats_desc[j].format, i);
1609 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1610 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1611 xskrq_stats_desc[j].format, i);
1612 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1613 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1614 rq_xdpsq_stats_desc[j].format, i);
1615 }
1616
1617 for (tc = 0; tc < priv->max_opened_tc; tc++)
1618 for (i = 0; i < max_nch; i++)
1619 for (j = 0; j < NUM_SQ_STATS; j++)
1620 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1621 sq_stats_desc[j].format,
1622 i + tc * max_nch);
1623
1624 for (i = 0; i < max_nch; i++) {
1625 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1626 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1627 xsksq_stats_desc[j].format, i);
1628 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1629 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1630 xdpsq_stats_desc[j].format, i);
1631 }
1632
1633 return idx;
1634}
1635
1636static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1637{
1638 bool is_xsk = priv->xsk.ever_used;
1639 int max_nch = priv->max_nch;
1640 int i, j, tc;
1641
1642 for (i = 0; i < max_nch; i++)
1643 for (j = 0; j < NUM_CH_STATS; j++)
1644 data[idx++] =
1645 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1646 ch_stats_desc, j);
1647
1648 for (i = 0; i < max_nch; i++) {
1649 for (j = 0; j < NUM_RQ_STATS; j++)
1650 data[idx++] =
1651 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1652 rq_stats_desc, j);
1653 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1654 data[idx++] =
1655 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1656 xskrq_stats_desc, j);
1657 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1658 data[idx++] =
1659 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1660 rq_xdpsq_stats_desc, j);
1661 }
1662
1663 for (tc = 0; tc < priv->max_opened_tc; tc++)
1664 for (i = 0; i < max_nch; i++)
1665 for (j = 0; j < NUM_SQ_STATS; j++)
1666 data[idx++] =
1667 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1668 sq_stats_desc, j);
1669
1670 for (i = 0; i < max_nch; i++) {
1671 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1672 data[idx++] =
1673 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1674 xsksq_stats_desc, j);
1675 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1676 data[idx++] =
1677 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1678 xdpsq_stats_desc, j);
1679 }
1680
1681 return idx;
1682}
1683
1684static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
1685
1686MLX5E_DEFINE_STATS_GRP(sw, 0);
1687MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
1688MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
1689MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
1690MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
1691MLX5E_DEFINE_STATS_GRP(2863, 0);
1692MLX5E_DEFINE_STATS_GRP(2819, 0);
1693MLX5E_DEFINE_STATS_GRP(phy, 0);
1694MLX5E_DEFINE_STATS_GRP(pcie, 0);
1695MLX5E_DEFINE_STATS_GRP(per_prio, 0);
1696MLX5E_DEFINE_STATS_GRP(pme, 0);
1697MLX5E_DEFINE_STATS_GRP(channels, 0);
1698MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
1699MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
1700static MLX5E_DEFINE_STATS_GRP(tls, 0);
1701
1702
1703mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
1704 &MLX5E_STATS_GRP(sw),
1705 &MLX5E_STATS_GRP(qcnt),
1706 &MLX5E_STATS_GRP(vnic_env),
1707 &MLX5E_STATS_GRP(vport),
1708 &MLX5E_STATS_GRP(802_3),
1709 &MLX5E_STATS_GRP(2863),
1710 &MLX5E_STATS_GRP(2819),
1711 &MLX5E_STATS_GRP(phy),
1712 &MLX5E_STATS_GRP(eth_ext),
1713 &MLX5E_STATS_GRP(pcie),
1714 &MLX5E_STATS_GRP(per_prio),
1715 &MLX5E_STATS_GRP(pme),
1716#ifdef CONFIG_MLX5_EN_IPSEC
1717 &MLX5E_STATS_GRP(ipsec_sw),
1718 &MLX5E_STATS_GRP(ipsec_hw),
1719#endif
1720 &MLX5E_STATS_GRP(tls),
1721 &MLX5E_STATS_GRP(channels),
1722 &MLX5E_STATS_GRP(per_port_buff_congest),
1723};
1724
1725unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
1726{
1727 return ARRAY_SIZE(mlx5e_nic_stats_grps);
1728}
1729