1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef __MLX5_EN_STATS_H__
34#define __MLX5_EN_STATS_H__
35
36#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
37 (*(u64 *)((char *)ptr + dsc[i].offset))
38#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
39 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
40#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
41 (*(u32 *)((char *)ptr + dsc[i].offset))
42#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
43 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
44
45#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
46#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
47#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
48#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
49#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
50#define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
51#define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
52#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
53
54struct counter_desc {
55 char format[ETH_GSTRING_LEN];
56 size_t offset;
57};
58
59enum {
60 MLX5E_NDO_UPDATE_STATS = BIT(0x1),
61};
62
63struct mlx5e_priv;
64struct mlx5e_stats_grp {
65 u16 update_stats_mask;
66 int (*get_num_stats)(struct mlx5e_priv *priv);
67 int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
68 int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
69 void (*update_stats)(struct mlx5e_priv *priv);
70};
71
72typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
73
74#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
75
76#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
77 int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
78
79#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
80 void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
81
82#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
83 int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
84
85#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
86 int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
87
88#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
89
90#define MLX5E_DECLARE_STATS_GRP(grp) \
91 const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
92
93#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
94MLX5E_DECLARE_STATS_GRP(grp) = { \
95 .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
96 .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
97 .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
98 .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
99 .update_stats_mask = mask, \
100}
101
102unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
103void mlx5e_stats_update(struct mlx5e_priv *priv);
104void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
105void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
106void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
107
108
109
110struct mlx5e_sw_stats {
111 u64 rx_packets;
112 u64 rx_bytes;
113 u64 tx_packets;
114 u64 tx_bytes;
115 u64 tx_tso_packets;
116 u64 tx_tso_bytes;
117 u64 tx_tso_inner_packets;
118 u64 tx_tso_inner_bytes;
119 u64 tx_added_vlan_packets;
120 u64 tx_nop;
121 u64 rx_lro_packets;
122 u64 rx_lro_bytes;
123 u64 rx_mcast_packets;
124 u64 rx_ecn_mark;
125 u64 rx_removed_vlan_packets;
126 u64 rx_csum_unnecessary;
127 u64 rx_csum_none;
128 u64 rx_csum_complete;
129 u64 rx_csum_complete_tail;
130 u64 rx_csum_complete_tail_slow;
131 u64 rx_csum_unnecessary_inner;
132 u64 rx_xdp_drop;
133 u64 rx_xdp_redirect;
134 u64 rx_xdp_tx_xmit;
135 u64 rx_xdp_tx_mpwqe;
136 u64 rx_xdp_tx_inlnw;
137 u64 rx_xdp_tx_nops;
138 u64 rx_xdp_tx_full;
139 u64 rx_xdp_tx_err;
140 u64 rx_xdp_tx_cqe;
141 u64 tx_csum_none;
142 u64 tx_csum_partial;
143 u64 tx_csum_partial_inner;
144 u64 tx_queue_stopped;
145 u64 tx_queue_dropped;
146 u64 tx_xmit_more;
147 u64 tx_recover;
148 u64 tx_cqes;
149 u64 tx_queue_wake;
150 u64 tx_cqe_err;
151 u64 tx_xdp_xmit;
152 u64 tx_xdp_mpwqe;
153 u64 tx_xdp_inlnw;
154 u64 tx_xdp_nops;
155 u64 tx_xdp_full;
156 u64 tx_xdp_err;
157 u64 tx_xdp_cqes;
158 u64 rx_wqe_err;
159 u64 rx_mpwqe_filler_cqes;
160 u64 rx_mpwqe_filler_strides;
161 u64 rx_oversize_pkts_sw_drop;
162 u64 rx_buff_alloc_err;
163 u64 rx_cqe_compress_blks;
164 u64 rx_cqe_compress_pkts;
165 u64 rx_cache_reuse;
166 u64 rx_cache_full;
167 u64 rx_cache_empty;
168 u64 rx_cache_busy;
169 u64 rx_cache_waive;
170 u64 rx_congst_umr;
171 u64 rx_arfs_err;
172 u64 rx_recover;
173 u64 ch_events;
174 u64 ch_poll;
175 u64 ch_arm;
176 u64 ch_aff_change;
177 u64 ch_force_irq;
178 u64 ch_eq_rearm;
179
180#ifdef CONFIG_MLX5_EN_TLS
181 u64 tx_tls_encrypted_packets;
182 u64 tx_tls_encrypted_bytes;
183 u64 tx_tls_ctx;
184 u64 tx_tls_ooo;
185 u64 tx_tls_dump_packets;
186 u64 tx_tls_dump_bytes;
187 u64 tx_tls_resync_bytes;
188 u64 tx_tls_skip_no_sync_data;
189 u64 tx_tls_drop_no_sync_data;
190 u64 tx_tls_drop_bypass_req;
191
192 u64 rx_tls_decrypted_packets;
193 u64 rx_tls_decrypted_bytes;
194 u64 rx_tls_ctx;
195 u64 rx_tls_del;
196 u64 rx_tls_resync_req_pkt;
197 u64 rx_tls_resync_req_start;
198 u64 rx_tls_resync_req_end;
199 u64 rx_tls_resync_req_skip;
200 u64 rx_tls_resync_res_ok;
201 u64 rx_tls_resync_res_skip;
202 u64 rx_tls_err;
203#endif
204
205 u64 rx_xsk_packets;
206 u64 rx_xsk_bytes;
207 u64 rx_xsk_csum_complete;
208 u64 rx_xsk_csum_unnecessary;
209 u64 rx_xsk_csum_unnecessary_inner;
210 u64 rx_xsk_csum_none;
211 u64 rx_xsk_ecn_mark;
212 u64 rx_xsk_removed_vlan_packets;
213 u64 rx_xsk_xdp_drop;
214 u64 rx_xsk_xdp_redirect;
215 u64 rx_xsk_wqe_err;
216 u64 rx_xsk_mpwqe_filler_cqes;
217 u64 rx_xsk_mpwqe_filler_strides;
218 u64 rx_xsk_oversize_pkts_sw_drop;
219 u64 rx_xsk_buff_alloc_err;
220 u64 rx_xsk_cqe_compress_blks;
221 u64 rx_xsk_cqe_compress_pkts;
222 u64 rx_xsk_congst_umr;
223 u64 rx_xsk_arfs_err;
224 u64 tx_xsk_xmit;
225 u64 tx_xsk_mpwqe;
226 u64 tx_xsk_inlnw;
227 u64 tx_xsk_full;
228 u64 tx_xsk_err;
229 u64 tx_xsk_cqes;
230};
231
232struct mlx5e_qcounter_stats {
233 u32 rx_out_of_buffer;
234 u32 rx_if_down_packets;
235};
236
237struct mlx5e_vnic_env_stats {
238 __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
239};
240
241#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
242 vstats->query_vport_out, c)
243
244struct mlx5e_vport_stats {
245 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
246};
247
248#define PPORT_802_3_GET(pstats, c) \
249 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
250 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
251#define PPORT_2863_GET(pstats, c) \
252 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
253 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
254#define PPORT_2819_GET(pstats, c) \
255 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
256 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
257#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
258 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
259 counter_set.phys_layer_statistical_cntrs.c##_high)
260#define PPORT_PER_PRIO_GET(pstats, prio, c) \
261 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
262 counter_set.eth_per_prio_grp_data_layout.c##_high)
263#define NUM_PPORT_PRIO 8
264#define PPORT_ETH_EXT_GET(pstats, c) \
265 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
266 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
267
268struct mlx5e_pport_stats {
269 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
270 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
271 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
272 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
273 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
274 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
275 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
276 __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
277 __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
278};
279
280#define PCIE_PERF_GET(pcie_stats, c) \
281 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
282 counter_set.pcie_perf_cntrs_grp_data_layout.c)
283
284#define PCIE_PERF_GET64(pcie_stats, c) \
285 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
286 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
287
288struct mlx5e_pcie_stats {
289 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
290};
291
292struct mlx5e_rq_stats {
293 u64 packets;
294 u64 bytes;
295 u64 csum_complete;
296 u64 csum_complete_tail;
297 u64 csum_complete_tail_slow;
298 u64 csum_unnecessary;
299 u64 csum_unnecessary_inner;
300 u64 csum_none;
301 u64 lro_packets;
302 u64 lro_bytes;
303 u64 mcast_packets;
304 u64 ecn_mark;
305 u64 removed_vlan_packets;
306 u64 xdp_drop;
307 u64 xdp_redirect;
308 u64 wqe_err;
309 u64 mpwqe_filler_cqes;
310 u64 mpwqe_filler_strides;
311 u64 oversize_pkts_sw_drop;
312 u64 buff_alloc_err;
313 u64 cqe_compress_blks;
314 u64 cqe_compress_pkts;
315 u64 cache_reuse;
316 u64 cache_full;
317 u64 cache_empty;
318 u64 cache_busy;
319 u64 cache_waive;
320 u64 congst_umr;
321 u64 arfs_err;
322 u64 recover;
323#ifdef CONFIG_MLX5_EN_TLS
324 u64 tls_decrypted_packets;
325 u64 tls_decrypted_bytes;
326 u64 tls_ctx;
327 u64 tls_del;
328 u64 tls_resync_req_pkt;
329 u64 tls_resync_req_start;
330 u64 tls_resync_req_end;
331 u64 tls_resync_req_skip;
332 u64 tls_resync_res_ok;
333 u64 tls_resync_res_skip;
334 u64 tls_err;
335#endif
336};
337
338struct mlx5e_sq_stats {
339
340 u64 packets;
341 u64 bytes;
342 u64 xmit_more;
343 u64 tso_packets;
344 u64 tso_bytes;
345 u64 tso_inner_packets;
346 u64 tso_inner_bytes;
347 u64 csum_partial;
348 u64 csum_partial_inner;
349 u64 added_vlan_packets;
350 u64 nop;
351#ifdef CONFIG_MLX5_EN_TLS
352 u64 tls_encrypted_packets;
353 u64 tls_encrypted_bytes;
354 u64 tls_ctx;
355 u64 tls_ooo;
356 u64 tls_dump_packets;
357 u64 tls_dump_bytes;
358 u64 tls_resync_bytes;
359 u64 tls_skip_no_sync_data;
360 u64 tls_drop_no_sync_data;
361 u64 tls_drop_bypass_req;
362#endif
363
364 u64 csum_none;
365 u64 stopped;
366 u64 dropped;
367 u64 recover;
368
369 u64 cqes ____cacheline_aligned_in_smp;
370 u64 wake;
371 u64 cqe_err;
372};
373
374struct mlx5e_xdpsq_stats {
375 u64 xmit;
376 u64 mpwqe;
377 u64 inlnw;
378 u64 nops;
379 u64 full;
380 u64 err;
381
382 u64 cqes ____cacheline_aligned_in_smp;
383};
384
385struct mlx5e_ch_stats {
386 u64 events;
387 u64 poll;
388 u64 arm;
389 u64 aff_change;
390 u64 force_irq;
391 u64 eq_rearm;
392};
393
394struct mlx5e_stats {
395 struct mlx5e_sw_stats sw;
396 struct mlx5e_qcounter_stats qcnt;
397 struct mlx5e_vnic_env_stats vnic;
398 struct mlx5e_vport_stats vport;
399 struct mlx5e_pport_stats pport;
400 struct rtnl_link_stats64 vf_vport;
401 struct mlx5e_pcie_stats pcie;
402};
403
404extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
405unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
406
407extern MLX5E_DECLARE_STATS_GRP(sw);
408extern MLX5E_DECLARE_STATS_GRP(qcnt);
409extern MLX5E_DECLARE_STATS_GRP(vnic_env);
410extern MLX5E_DECLARE_STATS_GRP(vport);
411extern MLX5E_DECLARE_STATS_GRP(802_3);
412extern MLX5E_DECLARE_STATS_GRP(2863);
413extern MLX5E_DECLARE_STATS_GRP(2819);
414extern MLX5E_DECLARE_STATS_GRP(phy);
415extern MLX5E_DECLARE_STATS_GRP(eth_ext);
416extern MLX5E_DECLARE_STATS_GRP(pcie);
417extern MLX5E_DECLARE_STATS_GRP(per_prio);
418extern MLX5E_DECLARE_STATS_GRP(pme);
419extern MLX5E_DECLARE_STATS_GRP(channels);
420extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
421extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
422extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
423
424#endif
425