1
2
3
4
5#include <rte_ethdev.h>
6#include <rte_io.h>
7#include <rte_malloc.h>
8
9#include "hns3_ethdev.h"
10#include "hns3_rxtx.h"
11#include "hns3_logs.h"
12#include "hns3_regs.h"
13
14
15static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
16 {"packets",
17 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
18 {"bytes",
19 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
20 {"errors",
21 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
22};
23
24
25static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
26 {"packets",
27 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
28 {"bytes",
29 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
30};
31
32
33static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
34 {"mac_tx_mac_pause_num",
35 HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
36 {"mac_rx_mac_pause_num",
37 HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
38 {"mac_tx_control_pkt_num",
39 HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
40 {"mac_rx_control_pkt_num",
41 HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
42 {"mac_tx_pfc_pkt_num",
43 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
44 {"mac_tx_pfc_pri0_pkt_num",
45 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
46 {"mac_tx_pfc_pri1_pkt_num",
47 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
48 {"mac_tx_pfc_pri2_pkt_num",
49 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
50 {"mac_tx_pfc_pri3_pkt_num",
51 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
52 {"mac_tx_pfc_pri4_pkt_num",
53 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
54 {"mac_tx_pfc_pri5_pkt_num",
55 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
56 {"mac_tx_pfc_pri6_pkt_num",
57 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
58 {"mac_tx_pfc_pri7_pkt_num",
59 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
60 {"mac_rx_pfc_pkt_num",
61 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
62 {"mac_rx_pfc_pri0_pkt_num",
63 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
64 {"mac_rx_pfc_pri1_pkt_num",
65 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
66 {"mac_rx_pfc_pri2_pkt_num",
67 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
68 {"mac_rx_pfc_pri3_pkt_num",
69 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
70 {"mac_rx_pfc_pri4_pkt_num",
71 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
72 {"mac_rx_pfc_pri5_pkt_num",
73 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
74 {"mac_rx_pfc_pri6_pkt_num",
75 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
76 {"mac_rx_pfc_pri7_pkt_num",
77 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
78 {"mac_tx_total_pkt_num",
79 HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
80 {"mac_tx_total_oct_num",
81 HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
82 {"mac_tx_good_pkt_num",
83 HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
84 {"mac_tx_bad_pkt_num",
85 HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
86 {"mac_tx_good_oct_num",
87 HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
88 {"mac_tx_bad_oct_num",
89 HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
90 {"mac_tx_uni_pkt_num",
91 HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
92 {"mac_tx_multi_pkt_num",
93 HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
94 {"mac_tx_broad_pkt_num",
95 HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
96 {"mac_tx_undersize_pkt_num",
97 HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
98 {"mac_tx_oversize_pkt_num",
99 HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
100 {"mac_tx_64_oct_pkt_num",
101 HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
102 {"mac_tx_65_127_oct_pkt_num",
103 HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
104 {"mac_tx_128_255_oct_pkt_num",
105 HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
106 {"mac_tx_256_511_oct_pkt_num",
107 HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
108 {"mac_tx_512_1023_oct_pkt_num",
109 HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
110 {"mac_tx_1024_1518_oct_pkt_num",
111 HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
112 {"mac_tx_1519_2047_oct_pkt_num",
113 HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
114 {"mac_tx_2048_4095_oct_pkt_num",
115 HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
116 {"mac_tx_4096_8191_oct_pkt_num",
117 HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
118 {"mac_tx_8192_9216_oct_pkt_num",
119 HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
120 {"mac_tx_9217_12287_oct_pkt_num",
121 HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
122 {"mac_tx_12288_16383_oct_pkt_num",
123 HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
124 {"mac_tx_1519_max_good_pkt_num",
125 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
126 {"mac_tx_1519_max_bad_pkt_num",
127 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
128 {"mac_rx_total_pkt_num",
129 HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
130 {"mac_rx_total_oct_num",
131 HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
132 {"mac_rx_good_pkt_num",
133 HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
134 {"mac_rx_bad_pkt_num",
135 HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
136 {"mac_rx_good_oct_num",
137 HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
138 {"mac_rx_bad_oct_num",
139 HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
140 {"mac_rx_uni_pkt_num",
141 HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
142 {"mac_rx_multi_pkt_num",
143 HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
144 {"mac_rx_broad_pkt_num",
145 HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
146 {"mac_rx_undersize_pkt_num",
147 HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
148 {"mac_rx_oversize_pkt_num",
149 HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
150 {"mac_rx_64_oct_pkt_num",
151 HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
152 {"mac_rx_65_127_oct_pkt_num",
153 HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
154 {"mac_rx_128_255_oct_pkt_num",
155 HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
156 {"mac_rx_256_511_oct_pkt_num",
157 HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
158 {"mac_rx_512_1023_oct_pkt_num",
159 HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
160 {"mac_rx_1024_1518_oct_pkt_num",
161 HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
162 {"mac_rx_1519_2047_oct_pkt_num",
163 HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
164 {"mac_rx_2048_4095_oct_pkt_num",
165 HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
166 {"mac_rx_4096_8191_oct_pkt_num",
167 HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
168 {"mac_rx_8192_9216_oct_pkt_num",
169 HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
170 {"mac_rx_9217_12287_oct_pkt_num",
171 HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
172 {"mac_rx_12288_16383_oct_pkt_num",
173 HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
174 {"mac_rx_1519_max_good_pkt_num",
175 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
176 {"mac_rx_1519_max_bad_pkt_num",
177 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
178 {"mac_tx_fragment_pkt_num",
179 HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
180 {"mac_tx_undermin_pkt_num",
181 HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
182 {"mac_tx_jabber_pkt_num",
183 HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
184 {"mac_tx_err_all_pkt_num",
185 HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
186 {"mac_tx_from_app_good_pkt_num",
187 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
188 {"mac_tx_from_app_bad_pkt_num",
189 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
190 {"mac_rx_fragment_pkt_num",
191 HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
192 {"mac_rx_undermin_pkt_num",
193 HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
194 {"mac_rx_jabber_pkt_num",
195 HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
196 {"mac_rx_fcs_err_pkt_num",
197 HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
198 {"mac_rx_send_app_good_pkt_num",
199 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
200 {"mac_rx_send_app_bad_pkt_num",
201 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
202};
203
204
205static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
206 {"REQ_RESET_CNT",
207 HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
208 {"GLOBAL_RESET_CNT",
209 HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
210 {"IMP_RESET_CNT",
211 HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
212 {"RESET_EXEC_CNT",
213 HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
214 {"RESET_SUCCESS_CNT",
215 HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
216 {"RESET_FAIL_CNT",
217 HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
218 {"RESET_MERGE_CNT",
219 HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
220};
221
222
223static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
224 {"PKT_LEN_ERRORS",
225 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
226 {"L2_ERRORS",
227 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
228};
229
230
231static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
232 {"L3_CHECKSUM_ERRORS",
233 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
234 {"L4_CHECKSUM_ERRORS",
235 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
236 {"OL3_CHECKSUM_ERRORS",
237 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
238 {"OL4_CHECKSUM_ERRORS",
239 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
240};
241
242
243static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
244 {"OVER_LENGTH_PKT_CNT",
245 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
246 {"EXCEED_LIMITED_BD_PKT_CNT",
247 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
248 {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
249 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
250 {"UNSUPPORTED_TUNNEL_PKT_CNT",
251 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
252 {"QUEUE_FULL_CNT",
253 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
254 {"SHORT_PKT_PAD_FAIL_CNT",
255 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
256};
257
258
259static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
260 {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
261};
262
263
264static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
265 {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
266};
267
268
269static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
270 {"RPU_DROP_CNT",
271 HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
272 {"SSU_DROP_CNT",
273 HNS3_IMISSED_STATS_FIELD_OFFSET(ssu_rx_drop_cnt)},
274};
275
276#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
277 sizeof(hns3_mac_strings[0]))
278
279#define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
280 sizeof(hns3_reset_stats_strings[0]))
281
282#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
283 sizeof(hns3_rx_bd_error_strings[0]))
284
285#define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
286 sizeof(hns3_rxq_dfx_stats_strings[0]))
287
288#define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
289 sizeof(hns3_txq_dfx_stats_strings[0]))
290
291#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
292 sizeof(hns3_rx_queue_strings[0]))
293
294#define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
295 sizeof(hns3_tx_queue_strings[0]))
296
297#define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
298 sizeof(hns3_rxq_basic_stats_strings[0]))
299
300#define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
301 sizeof(hns3_txq_basic_stats_strings[0]))
302
303#define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
304 sizeof(hns3_imissed_stats_strings[0]))
305
306#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_RESET_XSTATS)
307
308static void hns3_tqp_stats_clear(struct hns3_hw *hw);
309
310
311
312
313
314
315
316
317
318
319static int
320hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
321{
322 uint64_t *data = (uint64_t *)(&hw->mac_stats);
323 struct hns3_cmd_desc *desc;
324 uint64_t *desc_data;
325 uint16_t i, k, n;
326 int ret;
327
328 desc = rte_malloc("hns3_mac_desc",
329 desc_num * sizeof(struct hns3_cmd_desc), 0);
330 if (desc == NULL) {
331 hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
332 return -ENOMEM;
333 }
334
335 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
336 ret = hns3_cmd_send(hw, desc, desc_num);
337 if (ret) {
338 hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
339 rte_free(desc);
340 return ret;
341 }
342
343 for (i = 0; i < desc_num; i++) {
344
345 if (i == 0) {
346 desc_data = (uint64_t *)(&desc[i].data[0]);
347 n = HNS3_RD_FIRST_STATS_NUM;
348 } else {
349 desc_data = (uint64_t *)(&desc[i]);
350 n = HNS3_RD_OTHER_STATS_NUM;
351 }
352
353 for (k = 0; k < n; k++) {
354 *data += rte_le_to_cpu_64(*desc_data);
355 data++;
356 desc_data++;
357 }
358 }
359 rte_free(desc);
360
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373static int
374hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
375{
376 struct hns3_adapter *hns = dev->data->dev_private;
377 struct hns3_hw *hw = &hns->hw;
378 struct hns3_cmd_desc desc;
379 uint32_t *desc_data;
380 uint32_t reg_num;
381 int ret;
382
383 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
384 ret = hns3_cmd_send(hw, &desc, 1);
385 if (ret)
386 return ret;
387
388
389
390
391
392 desc_data = (uint32_t *)(&desc.data[0]);
393 reg_num = rte_le_to_cpu_32(*desc_data);
394
395
396
397
398
399
400 *desc_num = 1 + ((reg_num - 3) >> 2) +
401 (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
402
403 return 0;
404}
405
406static int
407hns3_query_update_mac_stats(struct rte_eth_dev *dev)
408{
409 struct hns3_adapter *hns = dev->data->dev_private;
410 struct hns3_hw *hw = &hns->hw;
411 uint32_t desc_num;
412 int ret;
413
414 ret = hns3_mac_query_reg_num(dev, &desc_num);
415 if (ret == 0)
416 ret = hns3_update_mac_stats(hw, desc_num);
417 else
418 hns3_err(hw, "Query mac reg num fail : %d", ret);
419 return ret;
420}
421
422static int
423hns3_update_port_rpu_drop_stats(struct hns3_hw *hw)
424{
425 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
426 struct hns3_query_rpu_cmd *req;
427 struct hns3_cmd_desc desc;
428 uint64_t cnt;
429 uint32_t tc_num;
430 int ret;
431
432 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
433 req = (struct hns3_query_rpu_cmd *)desc.data;
434
435
436
437
438
439 tc_num = 0;
440 req->tc_queue_num = rte_cpu_to_le_32(tc_num);
441 ret = hns3_cmd_send(hw, &desc, 1);
442 if (ret) {
443 hns3_err(hw, "failed to query RPU stats: %d", ret);
444 return ret;
445 }
446
447 cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
448 stats->rpu_rx_drop_cnt += cnt;
449
450 return 0;
451}
452
453static void
454hns3_update_function_rpu_drop_stats(struct hns3_hw *hw)
455{
456 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
457
458 stats->rpu_rx_drop_cnt += hns3_read_dev(hw, HNS3_RPU_DROP_CNT_REG);
459}
460
461static int
462hns3_update_rpu_drop_stats(struct hns3_hw *hw)
463{
464 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
465 int ret = 0;
466
467 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && !hns->is_vf)
468 ret = hns3_update_port_rpu_drop_stats(hw);
469 else if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2)
470 hns3_update_function_rpu_drop_stats(hw);
471
472 return ret;
473}
474
475static int
476hns3_get_ssu_drop_stats(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
477 int bd_num, bool is_rx)
478{
479 struct hns3_query_ssu_cmd *req;
480 int ret;
481 int i;
482
483 for (i = 0; i < bd_num - 1; i++) {
484 hns3_cmd_setup_basic_desc(&desc[i],
485 HNS3_OPC_SSU_DROP_REG, true);
486 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
487 }
488 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_SSU_DROP_REG, true);
489 req = (struct hns3_query_ssu_cmd *)desc[0].data;
490 req->rxtx = is_rx ? 0 : 1;
491 ret = hns3_cmd_send(hw, desc, bd_num);
492
493 return ret;
494}
495
496static int
497hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
498{
499 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
500 struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
501 struct hns3_query_ssu_cmd *req;
502 uint64_t cnt;
503 int ret;
504
505 ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
506 true);
507 if (ret) {
508 hns3_err(hw, "failed to get Rx SSU drop stats, ret = %d", ret);
509 return ret;
510 }
511
512 req = (struct hns3_query_ssu_cmd *)desc[0].data;
513 cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
514 rte_le_to_cpu_32(req->full_drop_cnt) +
515 rte_le_to_cpu_32(req->part_drop_cnt);
516
517 stats->ssu_rx_drop_cnt += cnt;
518
519 return 0;
520}
521
522static int
523hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
524{
525 struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
526 struct hns3_query_ssu_cmd *req;
527 uint64_t cnt;
528 int ret;
529
530 ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
531 false);
532 if (ret) {
533 hns3_err(hw, "failed to get Tx SSU drop stats, ret = %d", ret);
534 return ret;
535 }
536
537 req = (struct hns3_query_ssu_cmd *)desc[0].data;
538 cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
539 rte_le_to_cpu_32(req->full_drop_cnt) +
540 rte_le_to_cpu_32(req->part_drop_cnt);
541
542 hw->oerror_stats += cnt;
543
544 return 0;
545}
546
547int
548hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
549{
550 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
551 int ret;
552
553 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
554 return 0;
555
556 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf) {
557 ret = hns3_update_port_rx_ssu_drop_stats(hw);
558 if (ret)
559 return ret;
560 }
561
562 ret = hns3_update_rpu_drop_stats(hw);
563 if (ret)
564 return ret;
565
566 if (is_clear)
567 memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
568
569 return 0;
570}
571
572static int
573hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
574{
575 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
576 int ret;
577
578 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 || hns->is_vf)
579 return 0;
580
581 ret = hns3_update_port_tx_ssu_drop_stats(hw);
582 if (ret)
583 return ret;
584
585 if (is_clear)
586 hw->oerror_stats = 0;
587
588 return 0;
589}
590
591
592
593
594
595
596
597
598
599
600
601
602int
603hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
604{
605 struct hns3_adapter *hns = eth_dev->data->dev_private;
606 struct hns3_hw *hw = &hns->hw;
607 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
608 struct hns3_tqp_stats *stats = &hw->tqp_stats;
609 struct hns3_rx_queue *rxq;
610 struct hns3_tx_queue *txq;
611 uint64_t cnt;
612 uint16_t i;
613 int ret;
614
615
616 ret = hns3_update_imissed_stats(hw, false);
617 if (ret) {
618 hns3_err(hw, "update imissed stats failed, ret = %d",
619 ret);
620 return ret;
621 }
622 rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
623 imissed_stats->ssu_rx_drop_cnt;
624
625
626 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
627 rxq = eth_dev->data->rx_queues[i];
628 if (rxq == NULL)
629 continue;
630
631 cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
632
633
634
635
636 rte_stats->ierrors += rxq->err_stats.l2_errors +
637 rxq->err_stats.pkt_len_errors;
638 stats->rcb_rx_ring_pktnum_rcd += cnt;
639 stats->rcb_rx_ring_pktnum[i] += cnt;
640 rte_stats->ibytes += rxq->basic_stats.bytes;
641 }
642
643
644 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
645 txq = eth_dev->data->tx_queues[i];
646 if (txq == NULL)
647 continue;
648
649 cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
650 stats->rcb_tx_ring_pktnum_rcd += cnt;
651 stats->rcb_tx_ring_pktnum[i] += cnt;
652 rte_stats->obytes += txq->basic_stats.bytes;
653 }
654
655 ret = hns3_update_oerror_stats(hw, false);
656 if (ret) {
657 hns3_err(hw, "update oerror stats failed, ret = %d",
658 ret);
659 return ret;
660 }
661 rte_stats->oerrors = hw->oerror_stats;
662
663
664
665
666
667
668 rte_stats->ipackets =
669 stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
670 stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
671 rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
672 rte_stats->oerrors;
673 rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
674
675 return 0;
676}
677
678int
679hns3_stats_reset(struct rte_eth_dev *eth_dev)
680{
681 struct hns3_adapter *hns = eth_dev->data->dev_private;
682 struct hns3_hw *hw = &hns->hw;
683 struct hns3_rx_queue *rxq;
684 struct hns3_tx_queue *txq;
685 uint16_t i;
686 int ret;
687
688
689
690
691
692 ret = hns3_update_imissed_stats(hw, true);
693 if (ret) {
694 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
695 return ret;
696 }
697
698
699
700
701
702 ret = hns3_update_oerror_stats(hw, true);
703 if (ret) {
704 hns3_err(hw, "clear oerror stats failed, ret = %d",
705 ret);
706 return ret;
707 }
708
709 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
710 rxq = eth_dev->data->rx_queues[i];
711 if (rxq == NULL)
712 continue;
713
714 rxq->err_stats.pkt_len_errors = 0;
715 rxq->err_stats.l2_errors = 0;
716 }
717
718
719 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
720 rxq = eth_dev->data->rx_queues[i];
721 if (rxq == NULL)
722 continue;
723
724 memset(&rxq->basic_stats, 0,
725 sizeof(struct hns3_rx_basic_stats));
726
727
728 (void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
729 rxq->err_stats.pkt_len_errors = 0;
730 rxq->err_stats.l2_errors = 0;
731 }
732
733
734 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
735 txq = eth_dev->data->tx_queues[i];
736 if (txq == NULL)
737 continue;
738
739 memset(&txq->basic_stats, 0,
740 sizeof(struct hns3_tx_basic_stats));
741
742
743 (void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
744 }
745
746 hns3_tqp_stats_clear(hw);
747
748 return 0;
749}
750
751static int
752hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
753{
754 struct hns3_adapter *hns = dev->data->dev_private;
755 struct hns3_hw *hw = &hns->hw;
756 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
757 int ret;
758
759 ret = hns3_query_update_mac_stats(dev);
760 if (ret) {
761 hns3_err(hw, "Clear Mac stats fail : %d", ret);
762 return ret;
763 }
764
765 memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
766
767 return 0;
768}
769
770static int
771hns3_get_imissed_stats_num(struct hns3_adapter *hns)
772{
773#define NO_IMISSED_STATS_NUM 0
774#define RPU_STATS_ITEM_NUM 1
775 struct hns3_hw *hw = &hns->hw;
776
777 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
778 return NO_IMISSED_STATS_NUM;
779
780 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf)
781 return HNS3_NUM_IMISSED_XSTATS;
782
783 return RPU_STATS_ITEM_NUM;
784}
785
786
787static int
788hns3_xstats_calc_num(struct rte_eth_dev *dev)
789{
790#define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
791 HNS3_NUM_RXQ_DFX_XSTATS + \
792 HNS3_NUM_RX_QUEUE_STATS + \
793 HNS3_NUM_RXQ_BASIC_STATS)
794#define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
795 HNS3_NUM_TX_QUEUE_STATS + \
796 HNS3_NUM_TXQ_BASIC_STATS)
797
798 struct hns3_adapter *hns = dev->data->dev_private;
799 uint16_t nb_rx_q = dev->data->nb_rx_queues;
800 uint16_t nb_tx_q = dev->data->nb_tx_queues;
801 int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
802 int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
803 int stats_num;
804
805 stats_num = rx_comm_stats_num + tx_comm_stats_num;
806 stats_num += hns3_get_imissed_stats_num(hns);
807
808 if (hns->is_vf)
809 stats_num += HNS3_NUM_RESET_XSTATS;
810 else
811 stats_num += HNS3_FIX_NUM_STATS;
812
813 return stats_num;
814}
815
816static void
817hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
818 int *count)
819{
820 struct hns3_adapter *hns = dev->data->dev_private;
821 struct hns3_hw *hw = &hns->hw;
822 uint32_t reg_offset;
823 uint16_t i, j;
824
825
826 for (j = 0; j < dev->data->nb_rx_queues; j++) {
827 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
828 reg_offset = hns3_get_tqp_reg_offset(j);
829 xstats[*count].value = hns3_read_dev(hw,
830 reg_offset + hns3_rx_queue_strings[i].offset);
831 xstats[*count].id = *count;
832 (*count)++;
833 }
834 }
835
836
837 for (j = 0; j < dev->data->nb_tx_queues; j++) {
838 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
839 reg_offset = hns3_get_tqp_reg_offset(j);
840 xstats[*count].value = hns3_read_dev(hw,
841 reg_offset + hns3_tx_queue_strings[i].offset);
842 xstats[*count].id = *count;
843 (*count)++;
844 }
845 }
846}
847
848static void
849hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
850 int *count)
851{
852 struct hns3_rx_dfx_stats *dfx_stats;
853 struct hns3_rx_queue *rxq;
854 uint16_t i, j;
855 char *val;
856
857 for (i = 0; i < dev->data->nb_rx_queues; i++) {
858 rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
859 if (rxq == NULL)
860 continue;
861
862 dfx_stats = &rxq->dfx_stats;
863 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
864 val = (char *)dfx_stats +
865 hns3_rxq_dfx_stats_strings[j].offset;
866 xstats[*count].value = *(uint64_t *)val;
867 xstats[*count].id = *count;
868 (*count)++;
869 }
870 }
871}
872
873static void
874hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
875 int *count)
876{
877 struct hns3_tx_dfx_stats *dfx_stats;
878 struct hns3_tx_queue *txq;
879 uint16_t i, j;
880 char *val;
881
882 for (i = 0; i < dev->data->nb_tx_queues; i++) {
883 txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
884 if (txq == NULL)
885 continue;
886
887 dfx_stats = &txq->dfx_stats;
888 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
889 val = (char *)dfx_stats +
890 hns3_txq_dfx_stats_strings[j].offset;
891 xstats[*count].value = *(uint64_t *)val;
892 xstats[*count].id = *count;
893 (*count)++;
894 }
895 }
896}
897
898static void
899hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
900 int *count)
901{
902 hns3_rxq_dfx_stats_get(dev, xstats, count);
903 hns3_txq_dfx_stats_get(dev, xstats, count);
904}
905
906static void
907hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
908 int *count)
909{
910 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911 struct hns3_tqp_stats *stats = &hw->tqp_stats;
912 struct hns3_rx_basic_stats *rxq_stats;
913 struct hns3_rx_queue *rxq;
914 uint16_t i, j;
915 uint32_t cnt;
916 char *val;
917
918 for (i = 0; i < dev->data->nb_rx_queues; i++) {
919 rxq = dev->data->rx_queues[i];
920 if (rxq == NULL)
921 continue;
922
923 cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
924
925
926
927
928 rxq_stats = &rxq->basic_stats;
929 rxq_stats->errors = rxq->err_stats.l2_errors +
930 rxq->err_stats.pkt_len_errors;
931 stats->rcb_rx_ring_pktnum_rcd += cnt;
932 stats->rcb_rx_ring_pktnum[i] += cnt;
933
934
935
936
937
938
939
940 rxq_stats->packets =
941 stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
942 stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
943 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
944 val = (char *)rxq_stats +
945 hns3_rxq_basic_stats_strings[j].offset;
946 xstats[*count].value = *(uint64_t *)val;
947 xstats[*count].id = *count;
948 (*count)++;
949 }
950 }
951}
952
953static void
954hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
955 int *count)
956{
957 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
958 struct hns3_tqp_stats *stats = &hw->tqp_stats;
959 struct hns3_tx_basic_stats *txq_stats;
960 struct hns3_tx_queue *txq;
961 uint16_t i, j;
962 uint32_t cnt;
963 char *val;
964
965 for (i = 0; i < dev->data->nb_tx_queues; i++) {
966 txq = dev->data->tx_queues[i];
967 if (txq == NULL)
968 continue;
969
970 cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
971 stats->rcb_tx_ring_pktnum_rcd += cnt;
972 stats->rcb_tx_ring_pktnum[i] += cnt;
973
974 txq_stats = &txq->basic_stats;
975 txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
976
977 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
978 val = (char *)txq_stats +
979 hns3_txq_basic_stats_strings[j].offset;
980 xstats[*count].value = *(uint64_t *)val;
981 xstats[*count].id = *count;
982 (*count)++;
983 }
984 }
985}
986
987static void
988hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
989 int *count)
990{
991 hns3_rxq_basic_stats_get(dev, xstats, count);
992 hns3_txq_basic_stats_get(dev, xstats, count);
993}
994
995static void
996hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
997 int *count)
998{
999 struct hns3_adapter *hns = dev->data->dev_private;
1000 struct hns3_hw *hw = &hns->hw;
1001 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
1002 int imissed_stats_num;
1003 int cnt = *count;
1004 char *addr;
1005 uint16_t i;
1006
1007 imissed_stats_num = hns3_get_imissed_stats_num(hns);
1008
1009 for (i = 0; i < imissed_stats_num; i++) {
1010 addr = (char *)imissed_stats +
1011 hns3_imissed_stats_strings[i].offset;
1012 xstats[cnt].value = *(uint64_t *)addr;
1013 xstats[cnt].id = cnt;
1014 cnt++;
1015 }
1016
1017 *count = cnt;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033int
1034hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1035 unsigned int n)
1036{
1037 struct hns3_adapter *hns = dev->data->dev_private;
1038 struct hns3_hw *hw = &hns->hw;
1039 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
1040 struct hns3_reset_stats *reset_stats = &hw->reset.stats;
1041 struct hns3_rx_bd_errors_stats *rx_err_stats;
1042 struct hns3_rx_queue *rxq;
1043 uint16_t i, j;
1044 char *addr;
1045 int count;
1046 int ret;
1047
1048 if (xstats == NULL)
1049 return 0;
1050
1051 count = hns3_xstats_calc_num(dev);
1052 if ((int)n < count)
1053 return count;
1054
1055 count = 0;
1056
1057 hns3_tqp_basic_stats_get(dev, xstats, &count);
1058
1059 if (!hns->is_vf) {
1060
1061 ret = hns3_query_update_mac_stats(dev);
1062 if (ret < 0) {
1063 hns3_err(hw, "Update Mac stats fail : %d", ret);
1064 return ret;
1065 }
1066
1067
1068 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1069 addr = (char *)mac_stats + hns3_mac_strings[i].offset;
1070 xstats[count].value = *(uint64_t *)addr;
1071 xstats[count].id = count;
1072 count++;
1073 }
1074 }
1075
1076 ret = hns3_update_imissed_stats(hw, false);
1077 if (ret) {
1078 hns3_err(hw, "update imissed stats failed, ret = %d",
1079 ret);
1080 return ret;
1081 }
1082
1083 hns3_imissed_stats_get(dev, xstats, &count);
1084
1085
1086 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1087 addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
1088 xstats[count].value = *(uint64_t *)addr;
1089 xstats[count].id = count;
1090 count++;
1091 }
1092
1093
1094 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1095 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1096 rxq = dev->data->rx_queues[j];
1097 if (rxq) {
1098 rx_err_stats = &rxq->err_stats;
1099 addr = (char *)rx_err_stats +
1100 hns3_rx_bd_error_strings[i].offset;
1101 xstats[count].value = *(uint64_t *)addr;
1102 xstats[count].id = count;
1103 count++;
1104 }
1105 }
1106 }
1107
1108 hns3_tqp_dfx_stats_get(dev, xstats, &count);
1109 hns3_queue_stats_get(dev, xstats, &count);
1110
1111 return count;
1112}
1113
1114static void
1115hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
1116 struct rte_eth_xstat_name *xstats_names,
1117 uint32_t *count)
1118{
1119 uint16_t i, j;
1120
1121 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1122 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
1123 snprintf(xstats_names[*count].name,
1124 sizeof(xstats_names[*count].name),
1125 "rx_q%u_%s", i,
1126 hns3_rxq_basic_stats_strings[j].name);
1127 (*count)++;
1128 }
1129 }
1130 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1131 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
1132 snprintf(xstats_names[*count].name,
1133 sizeof(xstats_names[*count].name),
1134 "tx_q%u_%s", i,
1135 hns3_txq_basic_stats_strings[j].name);
1136 (*count)++;
1137 }
1138 }
1139}
1140
1141static void
1142hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
1143 struct rte_eth_xstat_name *xstats_names,
1144 uint32_t *count)
1145{
1146 uint16_t i, j;
1147
1148 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1149 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
1150 snprintf(xstats_names[*count].name,
1151 sizeof(xstats_names[*count].name),
1152 "rx_q%u_%s", i,
1153 hns3_rxq_dfx_stats_strings[j].name);
1154 (*count)++;
1155 }
1156 }
1157
1158 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1159 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
1160 snprintf(xstats_names[*count].name,
1161 sizeof(xstats_names[*count].name),
1162 "tx_q%u_%s", i,
1163 hns3_txq_dfx_stats_strings[j].name);
1164 (*count)++;
1165 }
1166 }
1167}
1168
1169static void
1170hns3_imissed_stats_name_get(struct rte_eth_dev *dev,
1171 struct rte_eth_xstat_name *xstats_names,
1172 uint32_t *count)
1173{
1174 struct hns3_adapter *hns = dev->data->dev_private;
1175 uint32_t cnt = *count;
1176 int imissed_stats_num;
1177 uint16_t i;
1178
1179 imissed_stats_num = hns3_get_imissed_stats_num(hns);
1180
1181 for (i = 0; i < imissed_stats_num; i++) {
1182 snprintf(xstats_names[cnt].name,
1183 sizeof(xstats_names[cnt].name),
1184 "%s", hns3_imissed_stats_strings[i].name);
1185 cnt++;
1186 }
1187
1188 *count = cnt;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215int
1216hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
1217 struct rte_eth_xstat_name *xstats_names,
1218 __rte_unused unsigned int size)
1219{
1220 struct hns3_adapter *hns = dev->data->dev_private;
1221 int cnt_stats = hns3_xstats_calc_num(dev);
1222 uint32_t count = 0;
1223 uint16_t i, j;
1224
1225 if (xstats_names == NULL)
1226 return cnt_stats;
1227
1228 hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
1229
1230
1231 if (!hns->is_vf) {
1232
1233 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1234 snprintf(xstats_names[count].name,
1235 sizeof(xstats_names[count].name),
1236 "%s", hns3_mac_strings[i].name);
1237 count++;
1238 }
1239 }
1240
1241 hns3_imissed_stats_name_get(dev, xstats_names, &count);
1242
1243 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1244 snprintf(xstats_names[count].name,
1245 sizeof(xstats_names[count].name),
1246 "%s", hns3_reset_stats_strings[i].name);
1247 count++;
1248 }
1249
1250 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1251 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1252 snprintf(xstats_names[count].name,
1253 sizeof(xstats_names[count].name),
1254 "rx_q%u_%s", j,
1255 hns3_rx_bd_error_strings[i].name);
1256 count++;
1257 }
1258 }
1259
1260 hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
1261
1262 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1263 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
1264 snprintf(xstats_names[count].name,
1265 sizeof(xstats_names[count].name),
1266 "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
1267 count++;
1268 }
1269 }
1270
1271 for (j = 0; j < dev->data->nb_tx_queues; j++) {
1272 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
1273 snprintf(xstats_names[count].name,
1274 sizeof(xstats_names[count].name),
1275 "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
1276 count++;
1277 }
1278 }
1279
1280 return count;
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306int
1307hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1308 uint64_t *values, uint32_t size)
1309{
1310 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1311 struct hns3_adapter *hns = dev->data->dev_private;
1312 struct rte_eth_xstat *values_copy;
1313 struct hns3_hw *hw = &hns->hw;
1314 uint32_t count_value;
1315 uint64_t len;
1316 uint32_t i;
1317
1318 if (ids == NULL && values == NULL)
1319 return cnt_stats;
1320
1321 if (ids == NULL)
1322 if (size < cnt_stats)
1323 return cnt_stats;
1324
1325 len = cnt_stats * sizeof(struct rte_eth_xstat);
1326 values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
1327 if (values_copy == NULL) {
1328 hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed "
1329 "to store statistics values", len);
1330 return -ENOMEM;
1331 }
1332
1333 count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
1334 if (count_value != cnt_stats) {
1335 rte_free(values_copy);
1336 return -EINVAL;
1337 }
1338
1339 if (ids == NULL && values != NULL) {
1340 for (i = 0; i < cnt_stats; i++)
1341 memcpy(&values[i], &values_copy[i].value,
1342 sizeof(values[i]));
1343
1344 rte_free(values_copy);
1345 return cnt_stats;
1346 }
1347
1348 for (i = 0; i < size; i++) {
1349 if (ids[i] >= cnt_stats) {
1350 hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, "
1351 "should < %u", i, ids[i], cnt_stats);
1352 rte_free(values_copy);
1353 return -EINVAL;
1354 }
1355 memcpy(&values[i], &values_copy[ids[i]].value,
1356 sizeof(values[i]));
1357 }
1358
1359 rte_free(values_copy);
1360 return size;
1361}
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384int
1385hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1386 struct rte_eth_xstat_name *xstats_names,
1387 const uint64_t *ids, uint32_t size)
1388{
1389 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1390 struct hns3_adapter *hns = dev->data->dev_private;
1391 struct rte_eth_xstat_name *names_copy;
1392 struct hns3_hw *hw = &hns->hw;
1393 uint64_t len;
1394 uint32_t i;
1395
1396 if (xstats_names == NULL)
1397 return cnt_stats;
1398
1399 if (ids == NULL) {
1400 if (size < cnt_stats)
1401 return cnt_stats;
1402
1403 return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
1404 }
1405
1406 len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1407 names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1408 if (names_copy == NULL) {
1409 hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed "
1410 "to store statistics names", len);
1411 return -ENOMEM;
1412 }
1413
1414 (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1415
1416 for (i = 0; i < size; i++) {
1417 if (ids[i] >= cnt_stats) {
1418 hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, "
1419 "should < %u", i, ids[i], cnt_stats);
1420 rte_free(names_copy);
1421 return -EINVAL;
1422 }
1423 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1424 "%s", names_copy[ids[i]].name);
1425 }
1426
1427 rte_free(names_copy);
1428 return size;
1429}
1430
1431static void
1432hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
1433{
1434 struct hns3_rx_queue *rxq;
1435 struct hns3_tx_queue *txq;
1436 uint16_t i;
1437
1438
1439 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1440 rxq = dev->data->rx_queues[i];
1441 if (rxq)
1442 memset(&rxq->dfx_stats, 0,
1443 sizeof(struct hns3_rx_dfx_stats));
1444 }
1445
1446
1447 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1448 txq = dev->data->tx_queues[i];
1449 if (txq)
1450 memset(&txq->dfx_stats, 0,
1451 sizeof(struct hns3_tx_dfx_stats));
1452 }
1453}
1454
1455int
1456hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1457{
1458 struct hns3_adapter *hns = dev->data->dev_private;
1459 int ret;
1460
1461
1462 ret = hns3_stats_reset(dev);
1463 if (ret)
1464 return ret;
1465
1466 hns3_tqp_dfx_stats_clear(dev);
1467
1468
1469 memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1470
1471 if (hns->is_vf)
1472 return 0;
1473
1474
1475 ret = hns3_mac_stats_reset(dev);
1476 if (ret)
1477 return ret;
1478
1479 return 0;
1480}
1481
1482int
1483hns3_tqp_stats_init(struct hns3_hw *hw)
1484{
1485 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1486
1487 tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1488 sizeof(uint64_t) * hw->tqps_num, 0);
1489 if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1490 hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1491 return -ENOMEM;
1492 }
1493
1494 tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1495 sizeof(uint64_t) * hw->tqps_num, 0);
1496 if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1497 hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1498 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1499 tqp_stats->rcb_rx_ring_pktnum = NULL;
1500 return -ENOMEM;
1501 }
1502
1503 return 0;
1504}
1505
1506void
1507hns3_tqp_stats_uninit(struct hns3_hw *hw)
1508{
1509 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1510
1511 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1512 tqp_stats->rcb_rx_ring_pktnum = NULL;
1513 rte_free(tqp_stats->rcb_tx_ring_pktnum);
1514 tqp_stats->rcb_tx_ring_pktnum = NULL;
1515}
1516
1517static void
1518hns3_tqp_stats_clear(struct hns3_hw *hw)
1519{
1520 struct hns3_tqp_stats *stats = &hw->tqp_stats;
1521
1522 stats->rcb_rx_ring_pktnum_rcd = 0;
1523 stats->rcb_tx_ring_pktnum_rcd = 0;
1524 memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1525 memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1526}
1527