1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "lib/mlx5.h"
34#include "en.h"
35#include "en_accel/tls.h"
36#include "en_accel/en_accel.h"
37
38static unsigned int stats_grps_num(struct mlx5e_priv *priv)
39{
40 return !priv->profile->stats_grps_num ? 0 :
41 priv->profile->stats_grps_num(priv);
42}
43
44unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
45{
46 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47 const unsigned int num_stats_grps = stats_grps_num(priv);
48 unsigned int total = 0;
49 int i;
50
51 for (i = 0; i < num_stats_grps; i++)
52 total += stats_grps[i]->get_num_stats(priv);
53
54 return total;
55}
56
57void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
58{
59 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60 const unsigned int num_stats_grps = stats_grps_num(priv);
61 int i;
62
63 for (i = num_stats_grps - 1; i >= 0; i--)
64 if (stats_grps[i]->update_stats &&
65 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
66 stats_grps[i]->update_stats(priv);
67}
68
69void mlx5e_stats_update(struct mlx5e_priv *priv)
70{
71 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
72 const unsigned int num_stats_grps = stats_grps_num(priv);
73 int i;
74
75 for (i = num_stats_grps - 1; i >= 0; i--)
76 if (stats_grps[i]->update_stats)
77 stats_grps[i]->update_stats(priv);
78}
79
80void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
81{
82 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83 const unsigned int num_stats_grps = stats_grps_num(priv);
84 int i;
85
86 for (i = 0; i < num_stats_grps; i++)
87 idx = stats_grps[i]->fill_stats(priv, data, idx);
88}
89
90void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
91{
92 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
93 const unsigned int num_stats_grps = stats_grps_num(priv);
94 int i, idx = 0;
95
96 for (i = 0; i < num_stats_grps; i++)
97 idx = stats_grps[i]->fill_strings(priv, data, idx);
98}
99
100
101
102static const struct counter_desc sw_stats_desc[] = {
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
115
116#ifdef CONFIG_MLX5_EN_TLS
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
126#endif
127
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
179#ifdef CONFIG_MLX5_EN_TLS
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
189#endif
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
221};
222
223#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
224
225static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
226{
227 return NUM_SW_COUNTERS;
228}
229
230static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
231{
232 int i;
233
234 for (i = 0; i < NUM_SW_COUNTERS; i++)
235 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
236 return idx;
237}
238
239static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
240{
241 int i;
242
243 for (i = 0; i < NUM_SW_COUNTERS; i++)
244 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
245 return idx;
246}
247
248static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
249 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
250{
251 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
252 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
253 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
254 s->tx_xdp_nops += xdpsq_red_stats->nops;
255 s->tx_xdp_full += xdpsq_red_stats->full;
256 s->tx_xdp_err += xdpsq_red_stats->err;
257 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
258}
259
260static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
261 struct mlx5e_xdpsq_stats *xdpsq_stats)
262{
263 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
264 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
265 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
266 s->rx_xdp_tx_nops += xdpsq_stats->nops;
267 s->rx_xdp_tx_full += xdpsq_stats->full;
268 s->rx_xdp_tx_err += xdpsq_stats->err;
269 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
270}
271
272static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
273 struct mlx5e_xdpsq_stats *xsksq_stats)
274{
275 s->tx_xsk_xmit += xsksq_stats->xmit;
276 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
277 s->tx_xsk_inlnw += xsksq_stats->inlnw;
278 s->tx_xsk_full += xsksq_stats->full;
279 s->tx_xsk_err += xsksq_stats->err;
280 s->tx_xsk_cqes += xsksq_stats->cqes;
281}
282
283static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
284 struct mlx5e_rq_stats *xskrq_stats)
285{
286 s->rx_xsk_packets += xskrq_stats->packets;
287 s->rx_xsk_bytes += xskrq_stats->bytes;
288 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
289 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
290 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
291 s->rx_xsk_csum_none += xskrq_stats->csum_none;
292 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
293 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
294 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
295 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
296 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
297 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
298 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
299 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
300 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
301 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
302 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
303 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
304 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
305}
306
307static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
308 struct mlx5e_rq_stats *rq_stats)
309{
310 s->rx_packets += rq_stats->packets;
311 s->rx_bytes += rq_stats->bytes;
312 s->rx_lro_packets += rq_stats->lro_packets;
313 s->rx_lro_bytes += rq_stats->lro_bytes;
314 s->rx_ecn_mark += rq_stats->ecn_mark;
315 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
316 s->rx_csum_none += rq_stats->csum_none;
317 s->rx_csum_complete += rq_stats->csum_complete;
318 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
319 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
320 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
321 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
322 s->rx_xdp_drop += rq_stats->xdp_drop;
323 s->rx_xdp_redirect += rq_stats->xdp_redirect;
324 s->rx_wqe_err += rq_stats->wqe_err;
325 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
326 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
327 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
328 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
329 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
330 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
331 s->rx_cache_reuse += rq_stats->cache_reuse;
332 s->rx_cache_full += rq_stats->cache_full;
333 s->rx_cache_empty += rq_stats->cache_empty;
334 s->rx_cache_busy += rq_stats->cache_busy;
335 s->rx_cache_waive += rq_stats->cache_waive;
336 s->rx_congst_umr += rq_stats->congst_umr;
337 s->rx_arfs_err += rq_stats->arfs_err;
338 s->rx_recover += rq_stats->recover;
339#ifdef CONFIG_MLX5_EN_TLS
340 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
341 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
342 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
343 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
344 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
345 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
346 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
347 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
348 s->rx_tls_err += rq_stats->tls_err;
349#endif
350}
351
352static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
353 struct mlx5e_ch_stats *ch_stats)
354{
355 s->ch_events += ch_stats->events;
356 s->ch_poll += ch_stats->poll;
357 s->ch_arm += ch_stats->arm;
358 s->ch_aff_change += ch_stats->aff_change;
359 s->ch_force_irq += ch_stats->force_irq;
360 s->ch_eq_rearm += ch_stats->eq_rearm;
361}
362
363static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
364 struct mlx5e_sq_stats *sq_stats)
365{
366 s->tx_packets += sq_stats->packets;
367 s->tx_bytes += sq_stats->bytes;
368 s->tx_tso_packets += sq_stats->tso_packets;
369 s->tx_tso_bytes += sq_stats->tso_bytes;
370 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
371 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
372 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
373 s->tx_nop += sq_stats->nop;
374 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
375 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
376 s->tx_queue_stopped += sq_stats->stopped;
377 s->tx_queue_wake += sq_stats->wake;
378 s->tx_queue_dropped += sq_stats->dropped;
379 s->tx_cqe_err += sq_stats->cqe_err;
380 s->tx_recover += sq_stats->recover;
381 s->tx_xmit_more += sq_stats->xmit_more;
382 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
383 s->tx_csum_none += sq_stats->csum_none;
384 s->tx_csum_partial += sq_stats->csum_partial;
385#ifdef CONFIG_MLX5_EN_TLS
386 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
387 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
388 s->tx_tls_ooo += sq_stats->tls_ooo;
389 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
390 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
391 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
392 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
393 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
394 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
395#endif
396 s->tx_cqes += sq_stats->cqes;
397}
398
399static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
400 struct mlx5e_sw_stats *s)
401{
402 int i;
403
404 if (!priv->port_ptp_opened)
405 return;
406
407 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
408
409 for (i = 0; i < priv->max_opened_tc; i++) {
410 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
411
412
413 barrier();
414 }
415}
416
417static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
418 struct mlx5e_sw_stats *s)
419{
420 struct mlx5e_sq_stats **stats;
421 u16 max_qos_sqs;
422 int i;
423
424
425 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
426 stats = READ_ONCE(priv->htb.qos_sq_stats);
427
428 for (i = 0; i < max_qos_sqs; i++) {
429 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
430
431
432 barrier();
433 }
434}
435
436static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
437{
438 struct mlx5e_sw_stats *s = &priv->stats.sw;
439 int i;
440
441 memset(s, 0, sizeof(*s));
442
443 for (i = 0; i < priv->max_nch; i++) {
444 struct mlx5e_channel_stats *channel_stats =
445 &priv->channel_stats[i];
446 int j;
447
448 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
449 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
450 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
451
452 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
453
454 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
455 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
456
457 for (j = 0; j < priv->max_opened_tc; j++) {
458 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
459
460
461 barrier();
462 }
463 }
464 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
465 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
466}
467
468static const struct counter_desc q_stats_desc[] = {
469 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
470};
471
472static const struct counter_desc drop_rq_stats_desc[] = {
473 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
474};
475
476#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
477#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
478
479static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
480{
481 int num_stats = 0;
482
483 if (priv->q_counter)
484 num_stats += NUM_Q_COUNTERS;
485
486 if (priv->drop_rq_q_counter)
487 num_stats += NUM_DROP_RQ_COUNTERS;
488
489 return num_stats;
490}
491
492static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
493{
494 int i;
495
496 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
497 strcpy(data + (idx++) * ETH_GSTRING_LEN,
498 q_stats_desc[i].format);
499
500 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
501 strcpy(data + (idx++) * ETH_GSTRING_LEN,
502 drop_rq_stats_desc[i].format);
503
504 return idx;
505}
506
507static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
508{
509 int i;
510
511 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
512 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
513 q_stats_desc, i);
514 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
515 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
516 drop_rq_stats_desc, i);
517 return idx;
518}
519
520static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
521{
522 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
523 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
524 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
525 int ret;
526
527 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
528
529 if (priv->q_counter) {
530 MLX5_SET(query_q_counter_in, in, counter_set_id,
531 priv->q_counter);
532 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
533 if (!ret)
534 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
535 out, out_of_buffer);
536 }
537
538 if (priv->drop_rq_q_counter) {
539 MLX5_SET(query_q_counter_in, in, counter_set_id,
540 priv->drop_rq_q_counter);
541 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
542 if (!ret)
543 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
544 out, out_of_buffer);
545 }
546}
547
548#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
549static const struct counter_desc vnic_env_stats_steer_desc[] = {
550 { "rx_steer_missed_packets",
551 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
552};
553
554static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
555 { "dev_internal_queue_oob",
556 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
557};
558
559#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
560 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
561 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
562#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
563 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
564 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
565
566static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
567{
568 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
569 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
570}
571
572static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
573{
574 int i;
575
576 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
577 strcpy(data + (idx++) * ETH_GSTRING_LEN,
578 vnic_env_stats_steer_desc[i].format);
579
580 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
581 strcpy(data + (idx++) * ETH_GSTRING_LEN,
582 vnic_env_stats_dev_oob_desc[i].format);
583 return idx;
584}
585
586static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
587{
588 int i;
589
590 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
591 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
592 vnic_env_stats_steer_desc, i);
593
594 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
595 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
596 vnic_env_stats_dev_oob_desc, i);
597 return idx;
598}
599
600static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
601{
602 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
603 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
604 struct mlx5_core_dev *mdev = priv->mdev;
605
606 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
607 return;
608
609 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
610 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
611}
612
613#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
614static const struct counter_desc vport_stats_desc[] = {
615 { "rx_vport_unicast_packets",
616 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
617 { "rx_vport_unicast_bytes",
618 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
619 { "tx_vport_unicast_packets",
620 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
621 { "tx_vport_unicast_bytes",
622 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
623 { "rx_vport_multicast_packets",
624 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
625 { "rx_vport_multicast_bytes",
626 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
627 { "tx_vport_multicast_packets",
628 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
629 { "tx_vport_multicast_bytes",
630 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
631 { "rx_vport_broadcast_packets",
632 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
633 { "rx_vport_broadcast_bytes",
634 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
635 { "tx_vport_broadcast_packets",
636 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
637 { "tx_vport_broadcast_bytes",
638 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
639 { "rx_vport_rdma_unicast_packets",
640 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
641 { "rx_vport_rdma_unicast_bytes",
642 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
643 { "tx_vport_rdma_unicast_packets",
644 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
645 { "tx_vport_rdma_unicast_bytes",
646 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
647 { "rx_vport_rdma_multicast_packets",
648 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
649 { "rx_vport_rdma_multicast_bytes",
650 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
651 { "tx_vport_rdma_multicast_packets",
652 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
653 { "tx_vport_rdma_multicast_bytes",
654 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
655};
656
657#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
658
659static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
660{
661 return NUM_VPORT_COUNTERS;
662}
663
664static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
665{
666 int i;
667
668 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
669 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
670 return idx;
671}
672
673static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
674{
675 int i;
676
677 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
678 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
679 vport_stats_desc, i);
680 return idx;
681}
682
683static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
684{
685 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
686 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
687 struct mlx5_core_dev *mdev = priv->mdev;
688
689 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
690 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
691}
692
693#define PPORT_802_3_OFF(c) \
694 MLX5_BYTE_OFF(ppcnt_reg, \
695 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
696static const struct counter_desc pport_802_3_stats_desc[] = {
697 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
698 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
699 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
700 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
701 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
702 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
703 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
704 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
705 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
706 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
707 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
708 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
709 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
710 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
711 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
712 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
713 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
714 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
715};
716
717#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
718
719static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
720{
721 return NUM_PPORT_802_3_COUNTERS;
722}
723
724static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
725{
726 int i;
727
728 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
729 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
730 return idx;
731}
732
733static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
734{
735 int i;
736
737 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
738 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
739 pport_802_3_stats_desc, i);
740 return idx;
741}
742
743#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
744 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
745
746static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
747{
748 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
749 struct mlx5_core_dev *mdev = priv->mdev;
750 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
751 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
752 void *out;
753
754 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
755 return;
756
757 MLX5_SET(ppcnt_reg, in, local_port, 1);
758 out = pstats->IEEE_802_3_counters;
759 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
760 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
761}
762
763#define MLX5E_READ_CTR64_BE_F(ptr, c) \
764 be64_to_cpu(*(__be64 *)((char *)ptr + \
765 MLX5_BYTE_OFF(ppcnt_reg, \
766 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
767
768void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
769 struct ethtool_pause_stats *pause_stats)
770{
771 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
772 struct mlx5_core_dev *mdev = priv->mdev;
773 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
774 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
775
776 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
777 return;
778
779 MLX5_SET(ppcnt_reg, in, local_port, 1);
780 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
781 mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
782 sz, MLX5_REG_PPCNT, 0, 0);
783
784 pause_stats->tx_pause_frames =
785 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
786 a_pause_mac_ctrl_frames_transmitted);
787 pause_stats->rx_pause_frames =
788 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
789 a_pause_mac_ctrl_frames_received);
790}
791
792#define PPORT_2863_OFF(c) \
793 MLX5_BYTE_OFF(ppcnt_reg, \
794 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
795static const struct counter_desc pport_2863_stats_desc[] = {
796 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
797 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
798 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
799};
800
801#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
802
803static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
804{
805 return NUM_PPORT_2863_COUNTERS;
806}
807
808static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
809{
810 int i;
811
812 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
813 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
814 return idx;
815}
816
817static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
818{
819 int i;
820
821 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
822 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
823 pport_2863_stats_desc, i);
824 return idx;
825}
826
827static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
828{
829 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
830 struct mlx5_core_dev *mdev = priv->mdev;
831 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
832 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
833 void *out;
834
835 MLX5_SET(ppcnt_reg, in, local_port, 1);
836 out = pstats->RFC_2863_counters;
837 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
838 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
839}
840
841#define PPORT_2819_OFF(c) \
842 MLX5_BYTE_OFF(ppcnt_reg, \
843 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
844static const struct counter_desc pport_2819_stats_desc[] = {
845 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
846 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
847 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
848 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
849 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
850 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
851 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
852 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
853 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
854 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
855 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
856 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
857 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
858};
859
860#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
861
862static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
863{
864 return NUM_PPORT_2819_COUNTERS;
865}
866
867static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
868{
869 int i;
870
871 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
872 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
873 return idx;
874}
875
876static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
877{
878 int i;
879
880 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
881 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
882 pport_2819_stats_desc, i);
883 return idx;
884}
885
886static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
887{
888 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
889 struct mlx5_core_dev *mdev = priv->mdev;
890 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
891 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
892 void *out;
893
894 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
895 return;
896
897 MLX5_SET(ppcnt_reg, in, local_port, 1);
898 out = pstats->RFC_2819_counters;
899 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
900 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
901}
902
903#define PPORT_PHY_STATISTICAL_OFF(c) \
904 MLX5_BYTE_OFF(ppcnt_reg, \
905 counter_set.phys_layer_statistical_cntrs.c##_high)
906static const struct counter_desc pport_phy_statistical_stats_desc[] = {
907 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
908 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
909};
910
911static const struct counter_desc
912pport_phy_statistical_err_lanes_stats_desc[] = {
913 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
914 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
915 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
916 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
917};
918
919#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
920 ARRAY_SIZE(pport_phy_statistical_stats_desc)
921#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
922 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
923
924static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
925{
926 struct mlx5_core_dev *mdev = priv->mdev;
927 int num_stats;
928
929
930 num_stats = 1;
931
932 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
933 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
934
935 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
936 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
937
938 return num_stats;
939}
940
941static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
942{
943 struct mlx5_core_dev *mdev = priv->mdev;
944 int i;
945
946 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
947
948 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
949 return idx;
950
951 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
952 strcpy(data + (idx++) * ETH_GSTRING_LEN,
953 pport_phy_statistical_stats_desc[i].format);
954
955 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
956 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
957 strcpy(data + (idx++) * ETH_GSTRING_LEN,
958 pport_phy_statistical_err_lanes_stats_desc[i].format);
959
960 return idx;
961}
962
963static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
964{
965 struct mlx5_core_dev *mdev = priv->mdev;
966 int i;
967
968
969 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
970 counter_set.phys_layer_cntrs.link_down_events);
971
972 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
973 return idx;
974
975 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
976 data[idx++] =
977 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
978 pport_phy_statistical_stats_desc, i);
979
980 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
981 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
982 data[idx++] =
983 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
984 pport_phy_statistical_err_lanes_stats_desc,
985 i);
986 return idx;
987}
988
989static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
990{
991 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
992 struct mlx5_core_dev *mdev = priv->mdev;
993 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
994 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
995 void *out;
996
997 MLX5_SET(ppcnt_reg, in, local_port, 1);
998 out = pstats->phy_counters;
999 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1000 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1001
1002 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1003 return;
1004
1005 out = pstats->phy_statistical_counters;
1006 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1007 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1008}
1009
1010#define PPORT_ETH_EXT_OFF(c) \
1011 MLX5_BYTE_OFF(ppcnt_reg, \
1012 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1013static const struct counter_desc pport_eth_ext_stats_desc[] = {
1014 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1015};
1016
1017#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1018
1019static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1020{
1021 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1022 return NUM_PPORT_ETH_EXT_COUNTERS;
1023
1024 return 0;
1025}
1026
1027static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1028{
1029 int i;
1030
1031 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1032 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1033 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1034 pport_eth_ext_stats_desc[i].format);
1035 return idx;
1036}
1037
1038static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1039{
1040 int i;
1041
1042 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1043 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1044 data[idx++] =
1045 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1046 pport_eth_ext_stats_desc, i);
1047 return idx;
1048}
1049
1050static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1051{
1052 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1053 struct mlx5_core_dev *mdev = priv->mdev;
1054 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1055 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1056 void *out;
1057
1058 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1059 return;
1060
1061 MLX5_SET(ppcnt_reg, in, local_port, 1);
1062 out = pstats->eth_ext_counters;
1063 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1064 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1065}
1066
1067#define PCIE_PERF_OFF(c) \
1068 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1069static const struct counter_desc pcie_perf_stats_desc[] = {
1070 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1071 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1072};
1073
1074#define PCIE_PERF_OFF64(c) \
1075 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1076static const struct counter_desc pcie_perf_stats_desc64[] = {
1077 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1078};
1079
1080static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1081 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1082 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1083 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1084 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1085};
1086
1087#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1088#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1089#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1090
1091static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1092{
1093 int num_stats = 0;
1094
1095 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1096 num_stats += NUM_PCIE_PERF_COUNTERS;
1097
1098 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1099 num_stats += NUM_PCIE_PERF_COUNTERS64;
1100
1101 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1102 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1103
1104 return num_stats;
1105}
1106
1107static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1108{
1109 int i;
1110
1111 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1112 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1113 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1114 pcie_perf_stats_desc[i].format);
1115
1116 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1117 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1118 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1119 pcie_perf_stats_desc64[i].format);
1120
1121 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1122 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1123 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1124 pcie_perf_stall_stats_desc[i].format);
1125 return idx;
1126}
1127
1128static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1129{
1130 int i;
1131
1132 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1133 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1134 data[idx++] =
1135 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1136 pcie_perf_stats_desc, i);
1137
1138 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1139 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1140 data[idx++] =
1141 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1142 pcie_perf_stats_desc64, i);
1143
1144 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1145 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1146 data[idx++] =
1147 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1148 pcie_perf_stall_stats_desc, i);
1149 return idx;
1150}
1151
1152static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1153{
1154 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1155 struct mlx5_core_dev *mdev = priv->mdev;
1156 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1157 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1158 void *out;
1159
1160 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1161 return;
1162
1163 out = pcie_stats->pcie_perf_counters;
1164 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1165 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1166}
1167
1168#define PPORT_PER_TC_PRIO_OFF(c) \
1169 MLX5_BYTE_OFF(ppcnt_reg, \
1170 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1171
1172static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1173 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1174};
1175
1176#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1177
1178#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1179 MLX5_BYTE_OFF(ppcnt_reg, \
1180 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1181
1182static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1183 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1184 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1185};
1186
1187#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1188 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1189
1190static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1191{
1192 struct mlx5_core_dev *mdev = priv->mdev;
1193
1194 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1195 return 0;
1196
1197 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1198}
1199
1200static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1201{
1202 struct mlx5_core_dev *mdev = priv->mdev;
1203 int i, prio;
1204
1205 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1206 return idx;
1207
1208 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1209 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1210 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1211 pport_per_tc_prio_stats_desc[i].format, prio);
1212 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1213 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1214 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1215 }
1216
1217 return idx;
1218}
1219
1220static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1221{
1222 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1223 struct mlx5_core_dev *mdev = priv->mdev;
1224 int i, prio;
1225
1226 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1227 return idx;
1228
1229 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1230 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1231 data[idx++] =
1232 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1233 pport_per_tc_prio_stats_desc, i);
1234 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1235 data[idx++] =
1236 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1237 pport_per_tc_congest_prio_stats_desc, i);
1238 }
1239
1240 return idx;
1241}
1242
1243static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1244{
1245 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1246 struct mlx5_core_dev *mdev = priv->mdev;
1247 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1248 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1249 void *out;
1250 int prio;
1251
1252 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1253 return;
1254
1255 MLX5_SET(ppcnt_reg, in, pnat, 2);
1256 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1257 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1258 out = pstats->per_tc_prio_counters[prio];
1259 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1260 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1261 }
1262}
1263
1264static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1265{
1266 struct mlx5_core_dev *mdev = priv->mdev;
1267
1268 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1269 return 0;
1270
1271 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1272}
1273
1274static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1275{
1276 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1277 struct mlx5_core_dev *mdev = priv->mdev;
1278 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1279 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1280 void *out;
1281 int prio;
1282
1283 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1284 return;
1285
1286 MLX5_SET(ppcnt_reg, in, pnat, 2);
1287 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1288 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1289 out = pstats->per_tc_congest_prio_counters[prio];
1290 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1291 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1292 }
1293}
1294
1295static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1296{
1297 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1298 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1299}
1300
1301static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1302{
1303 mlx5e_grp_per_tc_prio_update_stats(priv);
1304 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1305}
1306
1307#define PPORT_PER_PRIO_OFF(c) \
1308 MLX5_BYTE_OFF(ppcnt_reg, \
1309 counter_set.eth_per_prio_grp_data_layout.c##_high)
1310static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1311 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1312 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1313 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1314 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1315 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1316};
1317
1318#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1319
1320static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1321{
1322 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1323}
1324
1325static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1326 u8 *data,
1327 int idx)
1328{
1329 int i, prio;
1330
1331 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1332 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1333 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1334 pport_per_prio_traffic_stats_desc[i].format, prio);
1335 }
1336
1337 return idx;
1338}
1339
1340static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1341 u64 *data,
1342 int idx)
1343{
1344 int i, prio;
1345
1346 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1347 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1348 data[idx++] =
1349 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1350 pport_per_prio_traffic_stats_desc, i);
1351 }
1352
1353 return idx;
1354}
1355
1356static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1357
1358 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1359 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1360 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1361 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1362 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1363};
1364
1365static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1366 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1367 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1368};
1369
1370#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1371#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1372 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1373 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1374
1375static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1376{
1377 struct mlx5_core_dev *mdev = priv->mdev;
1378 u8 pfc_en_tx;
1379 u8 pfc_en_rx;
1380 int err;
1381
1382 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1383 return 0;
1384
1385 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1386
1387 return err ? 0 : pfc_en_tx | pfc_en_rx;
1388}
1389
1390static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1391{
1392 struct mlx5_core_dev *mdev = priv->mdev;
1393 u32 rx_pause;
1394 u32 tx_pause;
1395 int err;
1396
1397 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1398 return false;
1399
1400 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1401
1402 return err ? false : rx_pause | tx_pause;
1403}
1404
1405static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1406{
1407 return (mlx5e_query_global_pause_combined(priv) +
1408 hweight8(mlx5e_query_pfc_combined(priv))) *
1409 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1410 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1411}
1412
1413static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1414 u8 *data,
1415 int idx)
1416{
1417 unsigned long pfc_combined;
1418 int i, prio;
1419
1420 pfc_combined = mlx5e_query_pfc_combined(priv);
1421 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1422 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1423 char pfc_string[ETH_GSTRING_LEN];
1424
1425 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1426 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1427 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1428 }
1429 }
1430
1431 if (mlx5e_query_global_pause_combined(priv)) {
1432 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1433 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1434 pport_per_prio_pfc_stats_desc[i].format, "global");
1435 }
1436 }
1437
1438 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1439 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1440 pport_pfc_stall_stats_desc[i].format);
1441
1442 return idx;
1443}
1444
1445static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1446 u64 *data,
1447 int idx)
1448{
1449 unsigned long pfc_combined;
1450 int i, prio;
1451
1452 pfc_combined = mlx5e_query_pfc_combined(priv);
1453 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1454 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1455 data[idx++] =
1456 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1457 pport_per_prio_pfc_stats_desc, i);
1458 }
1459 }
1460
1461 if (mlx5e_query_global_pause_combined(priv)) {
1462 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1463 data[idx++] =
1464 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1465 pport_per_prio_pfc_stats_desc, i);
1466 }
1467 }
1468
1469 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1470 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1471 pport_pfc_stall_stats_desc, i);
1472
1473 return idx;
1474}
1475
1476static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1477{
1478 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1479 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1480}
1481
1482static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1483{
1484 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1485 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1486 return idx;
1487}
1488
1489static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1490{
1491 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1492 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1493 return idx;
1494}
1495
1496static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1497{
1498 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1499 struct mlx5_core_dev *mdev = priv->mdev;
1500 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1501 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1502 int prio;
1503 void *out;
1504
1505 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1506 return;
1507
1508 MLX5_SET(ppcnt_reg, in, local_port, 1);
1509 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1510 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1511 out = pstats->per_prio_counters[prio];
1512 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1513 mlx5_core_access_reg(mdev, in, sz, out, sz,
1514 MLX5_REG_PPCNT, 0, 0);
1515 }
1516}
1517
1518static const struct counter_desc mlx5e_pme_status_desc[] = {
1519 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1520};
1521
1522static const struct counter_desc mlx5e_pme_error_desc[] = {
1523 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1524 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1525 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1526};
1527
1528#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1529#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1530
1531static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1532{
1533 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1534}
1535
1536static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1537{
1538 int i;
1539
1540 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1541 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1542
1543 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1544 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1545
1546 return idx;
1547}
1548
1549static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1550{
1551 struct mlx5_pme_stats pme_stats;
1552 int i;
1553
1554 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1555
1556 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1557 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1558 mlx5e_pme_status_desc, i);
1559
1560 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1561 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1562 mlx5e_pme_error_desc, i);
1563
1564 return idx;
1565}
1566
1567static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1568
1569static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1570{
1571 return mlx5e_tls_get_count(priv);
1572}
1573
1574static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1575{
1576 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1577}
1578
1579static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1580{
1581 return idx + mlx5e_tls_get_stats(priv, data + idx);
1582}
1583
1584static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1585
1586static const struct counter_desc rq_stats_desc[] = {
1587 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1588 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1589 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1590 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1591 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1592 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1593 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1594 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1595 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1596 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1597 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1598 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1599 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1600 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1601 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1602 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1603 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1604 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1605 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1606 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1607 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1608 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1609 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1610 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1611 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1612 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1613 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1614 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1615 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1616#ifdef CONFIG_MLX5_EN_TLS
1617 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1618 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1619 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1620 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1621 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1622 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1623 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1624 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1625 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1626#endif
1627};
1628
1629static const struct counter_desc sq_stats_desc[] = {
1630 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1631 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1632 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1633 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1634 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1635 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1636 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1637 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1638 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1639 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1640 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1641 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1642#ifdef CONFIG_MLX5_EN_TLS
1643 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1644 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1645 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1646 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1647 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1648 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1649 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1650 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1651 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1652#endif
1653 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1654 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1655 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1656 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1657 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1658 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1659 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1660 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1661};
1662
1663static const struct counter_desc rq_xdpsq_stats_desc[] = {
1664 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1665 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1666 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1667 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1668 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1669 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1670 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1671};
1672
1673static const struct counter_desc xdpsq_stats_desc[] = {
1674 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1675 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1676 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1677 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1678 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1679 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1680 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1681};
1682
1683static const struct counter_desc xskrq_stats_desc[] = {
1684 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1685 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1686 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1687 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1688 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1689 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1690 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1691 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1692 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1693 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1694 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1695 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1696 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1697 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1698 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1699 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1700 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1701 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1702 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1703};
1704
1705static const struct counter_desc xsksq_stats_desc[] = {
1706 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1707 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1708 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1709 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1710 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1711 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1712};
1713
1714static const struct counter_desc ch_stats_desc[] = {
1715 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1716 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1717 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1718 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1719 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1720 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1721};
1722
1723static const struct counter_desc ptp_sq_stats_desc[] = {
1724 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
1725 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
1726 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1727 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1728 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1729 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
1730 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1731 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
1732 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
1733 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1734 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
1735 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
1736 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
1737 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1738};
1739
1740static const struct counter_desc ptp_ch_stats_desc[] = {
1741 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
1742 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
1743 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
1744 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1745};
1746
1747static const struct counter_desc ptp_cq_stats_desc[] = {
1748 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
1749 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
1750 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
1751 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
1752};
1753
1754static const struct counter_desc qos_sq_stats_desc[] = {
1755 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
1756 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
1757 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1758 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1759 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1760 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1761 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1762 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1763 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1764 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
1765 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1766 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1767#ifdef CONFIG_MLX5_EN_TLS
1768 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1769 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1770 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1771 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1772 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1773 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1774 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1775 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1776 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1777#endif
1778 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1779 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
1780 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
1781 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1782 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
1783 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
1784 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
1785 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1786};
1787
1788#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1789#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1790#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1791#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1792#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1793#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
1794#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1795#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
1796#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
1797#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
1798#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
1799
1800static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
1801{
1802
1803 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
1804}
1805
1806static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
1807{
1808
1809 u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
1810 int i, qid;
1811
1812 for (qid = 0; qid < max_qos_sqs; qid++)
1813 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
1814 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1815 qos_sq_stats_desc[i].format, qid);
1816
1817 return idx;
1818}
1819
1820static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
1821{
1822 struct mlx5e_sq_stats **stats;
1823 u16 max_qos_sqs;
1824 int i, qid;
1825
1826
1827 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
1828 stats = READ_ONCE(priv->htb.qos_sq_stats);
1829
1830 for (qid = 0; qid < max_qos_sqs; qid++) {
1831 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
1832
1833 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
1834 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
1835 }
1836
1837 return idx;
1838}
1839
1840static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
1841
1842static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
1843{
1844 return priv->port_ptp_opened ?
1845 NUM_PTP_CH_STATS +
1846 ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
1847 0;
1848}
1849
1850static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
1851{
1852 int i, tc;
1853
1854 if (!priv->port_ptp_opened)
1855 return idx;
1856
1857 for (i = 0; i < NUM_PTP_CH_STATS; i++)
1858 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1859 ptp_ch_stats_desc[i].format);
1860
1861 for (tc = 0; tc < priv->max_opened_tc; tc++)
1862 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
1863 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1864 ptp_sq_stats_desc[i].format, tc);
1865
1866 for (tc = 0; tc < priv->max_opened_tc; tc++)
1867 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
1868 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1869 ptp_cq_stats_desc[i].format, tc);
1870 return idx;
1871}
1872
1873static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
1874{
1875 int i, tc;
1876
1877 if (!priv->port_ptp_opened)
1878 return idx;
1879
1880 for (i = 0; i < NUM_PTP_CH_STATS; i++)
1881 data[idx++] =
1882 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
1883 ptp_ch_stats_desc, i);
1884
1885 for (tc = 0; tc < priv->max_opened_tc; tc++)
1886 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
1887 data[idx++] =
1888 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
1889 ptp_sq_stats_desc, i);
1890
1891 for (tc = 0; tc < priv->max_opened_tc; tc++)
1892 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
1893 data[idx++] =
1894 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
1895 ptp_cq_stats_desc, i);
1896
1897 return idx;
1898}
1899
1900static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
1901
1902static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1903{
1904 int max_nch = priv->max_nch;
1905
1906 return (NUM_RQ_STATS * max_nch) +
1907 (NUM_CH_STATS * max_nch) +
1908 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1909 (NUM_RQ_XDPSQ_STATS * max_nch) +
1910 (NUM_XDPSQ_STATS * max_nch) +
1911 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1912 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1913}
1914
1915static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1916{
1917 bool is_xsk = priv->xsk.ever_used;
1918 int max_nch = priv->max_nch;
1919 int i, j, tc;
1920
1921 for (i = 0; i < max_nch; i++)
1922 for (j = 0; j < NUM_CH_STATS; j++)
1923 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1924 ch_stats_desc[j].format, i);
1925
1926 for (i = 0; i < max_nch; i++) {
1927 for (j = 0; j < NUM_RQ_STATS; j++)
1928 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1929 rq_stats_desc[j].format, i);
1930 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1931 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1932 xskrq_stats_desc[j].format, i);
1933 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1934 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1935 rq_xdpsq_stats_desc[j].format, i);
1936 }
1937
1938 for (tc = 0; tc < priv->max_opened_tc; tc++)
1939 for (i = 0; i < max_nch; i++)
1940 for (j = 0; j < NUM_SQ_STATS; j++)
1941 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1942 sq_stats_desc[j].format,
1943 i + tc * max_nch);
1944
1945 for (i = 0; i < max_nch; i++) {
1946 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1947 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1948 xsksq_stats_desc[j].format, i);
1949 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1950 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1951 xdpsq_stats_desc[j].format, i);
1952 }
1953
1954 return idx;
1955}
1956
1957static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1958{
1959 bool is_xsk = priv->xsk.ever_used;
1960 int max_nch = priv->max_nch;
1961 int i, j, tc;
1962
1963 for (i = 0; i < max_nch; i++)
1964 for (j = 0; j < NUM_CH_STATS; j++)
1965 data[idx++] =
1966 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1967 ch_stats_desc, j);
1968
1969 for (i = 0; i < max_nch; i++) {
1970 for (j = 0; j < NUM_RQ_STATS; j++)
1971 data[idx++] =
1972 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1973 rq_stats_desc, j);
1974 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1975 data[idx++] =
1976 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1977 xskrq_stats_desc, j);
1978 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1979 data[idx++] =
1980 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1981 rq_xdpsq_stats_desc, j);
1982 }
1983
1984 for (tc = 0; tc < priv->max_opened_tc; tc++)
1985 for (i = 0; i < max_nch; i++)
1986 for (j = 0; j < NUM_SQ_STATS; j++)
1987 data[idx++] =
1988 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1989 sq_stats_desc, j);
1990
1991 for (i = 0; i < max_nch; i++) {
1992 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1993 data[idx++] =
1994 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1995 xsksq_stats_desc, j);
1996 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1997 data[idx++] =
1998 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1999 xdpsq_stats_desc, j);
2000 }
2001
2002 return idx;
2003}
2004
2005static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2006
2007MLX5E_DEFINE_STATS_GRP(sw, 0);
2008MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2009MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2010MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2011MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2012MLX5E_DEFINE_STATS_GRP(2863, 0);
2013MLX5E_DEFINE_STATS_GRP(2819, 0);
2014MLX5E_DEFINE_STATS_GRP(phy, 0);
2015MLX5E_DEFINE_STATS_GRP(pcie, 0);
2016MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2017MLX5E_DEFINE_STATS_GRP(pme, 0);
2018MLX5E_DEFINE_STATS_GRP(channels, 0);
2019MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2020MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2021static MLX5E_DEFINE_STATS_GRP(tls, 0);
2022static MLX5E_DEFINE_STATS_GRP(ptp, 0);
2023static MLX5E_DEFINE_STATS_GRP(qos, 0);
2024
2025
2026mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2027 &MLX5E_STATS_GRP(sw),
2028 &MLX5E_STATS_GRP(qcnt),
2029 &MLX5E_STATS_GRP(vnic_env),
2030 &MLX5E_STATS_GRP(vport),
2031 &MLX5E_STATS_GRP(802_3),
2032 &MLX5E_STATS_GRP(2863),
2033 &MLX5E_STATS_GRP(2819),
2034 &MLX5E_STATS_GRP(phy),
2035 &MLX5E_STATS_GRP(eth_ext),
2036 &MLX5E_STATS_GRP(pcie),
2037 &MLX5E_STATS_GRP(per_prio),
2038 &MLX5E_STATS_GRP(pme),
2039#ifdef CONFIG_MLX5_EN_IPSEC
2040 &MLX5E_STATS_GRP(ipsec_sw),
2041 &MLX5E_STATS_GRP(ipsec_hw),
2042#endif
2043 &MLX5E_STATS_GRP(tls),
2044 &MLX5E_STATS_GRP(channels),
2045 &MLX5E_STATS_GRP(per_port_buff_congest),
2046 &MLX5E_STATS_GRP(ptp),
2047 &MLX5E_STATS_GRP(qos),
2048};
2049
2050unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2051{
2052 return ARRAY_SIZE(mlx5e_nic_stats_grps);
2053}
2054