1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#ifndef __MLX5_EN_STATS_H__
33#define __MLX5_EN_STATS_H__
34
35#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
36 (*(u64 *)((char *)ptr + dsc[i].offset))
37#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
38 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
39#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
40 (*(u32 *)((char *)ptr + dsc[i].offset))
41#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
43
44#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
46#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
47#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
48#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
49#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
50
51struct counter_desc {
52 char format[ETH_GSTRING_LEN];
53 size_t offset;
54};
55
56struct mlx5e_sw_stats {
57 u64 rx_packets;
58 u64 rx_bytes;
59 u64 tx_packets;
60 u64 tx_bytes;
61 u64 tx_tso_packets;
62 u64 tx_tso_bytes;
63 u64 tx_tso_inner_packets;
64 u64 tx_tso_inner_bytes;
65 u64 tx_added_vlan_packets;
66 u64 tx_nop;
67 u64 rx_lro_packets;
68 u64 rx_lro_bytes;
69 u64 rx_ecn_mark;
70 u64 rx_removed_vlan_packets;
71 u64 rx_csum_unnecessary;
72 u64 rx_csum_none;
73 u64 rx_csum_complete;
74 u64 rx_csum_unnecessary_inner;
75 u64 rx_xdp_drop;
76 u64 rx_xdp_redirect;
77 u64 rx_xdp_tx_xmit;
78 u64 rx_xdp_tx_full;
79 u64 rx_xdp_tx_err;
80 u64 rx_xdp_tx_cqe;
81 u64 tx_csum_none;
82 u64 tx_csum_partial;
83 u64 tx_csum_partial_inner;
84 u64 tx_queue_stopped;
85 u64 tx_queue_dropped;
86 u64 tx_xmit_more;
87 u64 tx_recover;
88 u64 tx_cqes;
89 u64 tx_queue_wake;
90 u64 tx_cqe_err;
91 u64 tx_xdp_xmit;
92 u64 tx_xdp_full;
93 u64 tx_xdp_err;
94 u64 tx_xdp_cqes;
95 u64 rx_wqe_err;
96 u64 rx_mpwqe_filler_cqes;
97 u64 rx_mpwqe_filler_strides;
98 u64 rx_oversize_pkts_sw_drop;
99 u64 rx_buff_alloc_err;
100 u64 rx_cqe_compress_blks;
101 u64 rx_cqe_compress_pkts;
102 u64 rx_page_reuse;
103 u64 rx_cache_reuse;
104 u64 rx_cache_full;
105 u64 rx_cache_empty;
106 u64 rx_cache_busy;
107 u64 rx_cache_waive;
108 u64 rx_congst_umr;
109 u64 rx_arfs_err;
110 u64 ch_events;
111 u64 ch_poll;
112 u64 ch_arm;
113 u64 ch_aff_change;
114 u64 ch_eq_rearm;
115
116#ifdef CONFIG_MLX5_EN_TLS
117 u64 tx_tls_ooo;
118 u64 tx_tls_resync_bytes;
119#endif
120};
121
122struct mlx5e_qcounter_stats {
123 u32 rx_out_of_buffer;
124 u32 rx_if_down_packets;
125};
126
127struct mlx5e_vnic_env_stats {
128 __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
129};
130
131#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
132 vstats->query_vport_out, c)
133
134struct mlx5e_vport_stats {
135 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
136};
137
138#define PPORT_802_3_GET(pstats, c) \
139 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
140 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
141#define PPORT_2863_GET(pstats, c) \
142 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
143 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
144#define PPORT_2819_GET(pstats, c) \
145 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
146 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
147#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
148 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
149 counter_set.phys_layer_statistical_cntrs.c##_high)
150#define PPORT_PER_PRIO_GET(pstats, prio, c) \
151 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
152 counter_set.eth_per_prio_grp_data_layout.c##_high)
153#define NUM_PPORT_PRIO 8
154#define PPORT_ETH_EXT_GET(pstats, c) \
155 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
156 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
157
158struct mlx5e_pport_stats {
159 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
160 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
161 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
162 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
163 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
164 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
165 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
166};
167
168#define PCIE_PERF_GET(pcie_stats, c) \
169 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
170 counter_set.pcie_perf_cntrs_grp_data_layout.c)
171
172#define PCIE_PERF_GET64(pcie_stats, c) \
173 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
174 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
175
176struct mlx5e_pcie_stats {
177 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
178};
179
180struct mlx5e_rq_stats {
181 u64 packets;
182 u64 bytes;
183 u64 csum_complete;
184 u64 csum_unnecessary;
185 u64 csum_unnecessary_inner;
186 u64 csum_none;
187 u64 lro_packets;
188 u64 lro_bytes;
189 u64 ecn_mark;
190 u64 removed_vlan_packets;
191 u64 xdp_drop;
192 u64 xdp_redirect;
193 u64 wqe_err;
194 u64 mpwqe_filler_cqes;
195 u64 mpwqe_filler_strides;
196 u64 oversize_pkts_sw_drop;
197 u64 buff_alloc_err;
198 u64 cqe_compress_blks;
199 u64 cqe_compress_pkts;
200 u64 page_reuse;
201 u64 cache_reuse;
202 u64 cache_full;
203 u64 cache_empty;
204 u64 cache_busy;
205 u64 cache_waive;
206 u64 congst_umr;
207 u64 arfs_err;
208};
209
210struct mlx5e_sq_stats {
211
212 u64 packets;
213 u64 bytes;
214 u64 xmit_more;
215 u64 tso_packets;
216 u64 tso_bytes;
217 u64 tso_inner_packets;
218 u64 tso_inner_bytes;
219 u64 csum_partial;
220 u64 csum_partial_inner;
221 u64 added_vlan_packets;
222 u64 nop;
223#ifdef CONFIG_MLX5_EN_TLS
224 u64 tls_ooo;
225 u64 tls_resync_bytes;
226#endif
227
228 u64 csum_none;
229 u64 stopped;
230 u64 dropped;
231 u64 recover;
232
233 u64 cqes ____cacheline_aligned_in_smp;
234 u64 wake;
235 u64 cqe_err;
236};
237
238struct mlx5e_xdpsq_stats {
239 u64 xmit;
240 u64 full;
241 u64 err;
242
243 u64 cqes ____cacheline_aligned_in_smp;
244};
245
246struct mlx5e_ch_stats {
247 u64 events;
248 u64 poll;
249 u64 arm;
250 u64 aff_change;
251 u64 eq_rearm;
252};
253
254struct mlx5e_stats {
255 struct mlx5e_sw_stats sw;
256 struct mlx5e_qcounter_stats qcnt;
257 struct mlx5e_vnic_env_stats vnic;
258 struct mlx5e_vport_stats vport;
259 struct mlx5e_pport_stats pport;
260 struct rtnl_link_stats64 vf_vport;
261 struct mlx5e_pcie_stats pcie;
262};
263
264enum {
265 MLX5E_NDO_UPDATE_STATS = BIT(0x1),
266};
267
268struct mlx5e_priv;
269struct mlx5e_stats_grp {
270 u16 update_stats_mask;
271 int (*get_num_stats)(struct mlx5e_priv *priv);
272 int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
273 int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
274 void (*update_stats)(struct mlx5e_priv *priv);
275};
276
277extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
278extern const int mlx5e_num_stats_grps;
279
280void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
281
282#endif
283