1
2
3
4
5#ifndef _HNS3_RXTX_H_
6#define _HNS3_RXTX_H_
7
8#include <stdint.h>
9#include <rte_mbuf_core.h>
10
11#define HNS3_MIN_RING_DESC 64
12#define HNS3_MAX_RING_DESC 32768
13#define HNS3_DEFAULT_RING_DESC 1024
14#define HNS3_ALIGN_RING_DESC 32
15#define HNS3_RING_BASE_ALIGN 128
16#define HNS3_BULK_ALLOC_MBUF_NUM 32
17
18#define HNS3_DEFAULT_RX_FREE_THRESH 32
19#define HNS3_DEFAULT_TX_FREE_THRESH 32
20#define HNS3_DEFAULT_TX_RS_THRESH 32
21#define HNS3_TX_FAST_FREE_AHEAD 64
22
23#define HNS3_DEFAULT_RX_BURST 64
24#if (HNS3_DEFAULT_RX_BURST > 64)
25#error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n"
26#endif
27#define HNS3_DEFAULT_DESCS_PER_LOOP 4
28#define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8
29#if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP)
30#define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP
31#else
32#define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP
33#endif
34#define HNS3_DEFAULT_RXQ_REARM_THRESH 64
35#define HNS3_UINT8_BIT 8
36#define HNS3_UINT16_BIT 16
37#define HNS3_UINT32_BIT 32
38
39#define HNS3_512_BD_BUF_SIZE 512
40#define HNS3_1K_BD_BUF_SIZE 1024
41#define HNS3_2K_BD_BUF_SIZE 2048
42#define HNS3_4K_BD_BUF_SIZE 4096
43
44#define HNS3_MIN_BD_BUF_SIZE HNS3_512_BD_BUF_SIZE
45#define HNS3_MAX_BD_BUF_SIZE HNS3_4K_BD_BUF_SIZE
46
47#define HNS3_BD_SIZE_512_TYPE 0
48#define HNS3_BD_SIZE_1024_TYPE 1
49#define HNS3_BD_SIZE_2048_TYPE 2
50#define HNS3_BD_SIZE_4096_TYPE 3
51
52#define HNS3_RX_FLAG_VLAN_PRESENT 0x1
53#define HNS3_RX_FLAG_L3ID_IPV4 0x0
54#define HNS3_RX_FLAG_L3ID_IPV6 0x1
55#define HNS3_RX_FLAG_L4ID_UDP 0x0
56#define HNS3_RX_FLAG_L4ID_TCP 0x1
57
58#define HNS3_RXD_DMAC_S 0
59#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
60#define HNS3_RXD_VLAN_S 2
61#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
62#define HNS3_RXD_L3ID_S 4
63#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
64#define HNS3_RXD_L4ID_S 8
65#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
66#define HNS3_RXD_FRAG_B 12
67#define HNS3_RXD_STRP_TAGP_S 13
68#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
69
70#define HNS3_RXD_L2E_B 16
71#define HNS3_RXD_L3E_B 17
72#define HNS3_RXD_L4E_B 18
73#define HNS3_RXD_TRUNCATE_B 19
74#define HNS3_RXD_HOI_B 20
75#define HNS3_RXD_DOI_B 21
76#define HNS3_RXD_OL3E_B 22
77#define HNS3_RXD_OL4E_B 23
78#define HNS3_RXD_GRO_COUNT_S 24
79#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
80#define HNS3_RXD_GRO_FIXID_B 30
81#define HNS3_RXD_GRO_ECN_B 31
82
83#define HNS3_RXD_ODMAC_S 0
84#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
85#define HNS3_RXD_OVLAN_S 2
86#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
87#define HNS3_RXD_OL3ID_S 4
88#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
89#define HNS3_RXD_OL4ID_S 8
90#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
91#define HNS3_RXD_PTYPE_S 4
92#define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S)
93#define HNS3_RXD_FBHI_S 12
94#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
95#define HNS3_RXD_FBLI_S 14
96#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
97
98#define HNS3_RXD_BDTYPE_S 0
99#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
100#define HNS3_RXD_VLD_B 4
101#define HNS3_RXD_UDP0_B 5
102#define HNS3_RXD_EXTEND_B 7
103#define HNS3_RXD_FE_B 8
104#define HNS3_RXD_LUM_B 9
105#define HNS3_RXD_CRCP_B 10
106#define HNS3_RXD_L3L4P_B 11
107
108#define HNS3_RXD_TS_VLD_B 14
109#define HNS3_RXD_GRO_SIZE_S 16
110#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
111
112#define HNS3_TXD_L3T_S 0
113#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
114#define HNS3_TXD_L4T_S 2
115#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
116#define HNS3_TXD_L3CS_B 4
117#define HNS3_TXD_L4CS_B 5
118#define HNS3_TXD_VLAN_B 6
119#define HNS3_TXD_TSO_B 7
120
121#define HNS3_TXD_L2LEN_S 8
122#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
123#define HNS3_TXD_L3LEN_S 16
124#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
125#define HNS3_TXD_L4LEN_S 24
126#define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S)
127
128#define HNS3_TXD_OL3T_S 0
129#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
130#define HNS3_TXD_OVLAN_B 2
131#define HNS3_TXD_MACSEC_B 3
132#define HNS3_TXD_TUNTYPE_S 4
133#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
134
135#define HNS3_TXD_BDTYPE_S 0
136#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
137#define HNS3_TXD_FE_B 4
138#define HNS3_TXD_SC_S 5
139#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
140#define HNS3_TXD_EXTEND_B 7
141#define HNS3_TXD_VLD_B 8
142#define HNS3_TXD_RI_B 9
143#define HNS3_TXD_RA_B 10
144#define HNS3_TXD_TSYN_B 11
145#define HNS3_TXD_DECTTL_S 12
146#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
147
148#define HNS3_TXD_MSS_S 0
149#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
150
151#define HNS3_TXD_OL4CS_B 22
152#define HNS3_L2_LEN_UNIT 1UL
153#define HNS3_L3_LEN_UNIT 2UL
154#define HNS3_L4_LEN_UNIT 2UL
155
156#define HNS3_TXD_DEFAULT_BDTYPE 0
157#define HNS3_TXD_VLD_CMD (0x1 << HNS3_TXD_VLD_B)
158#define HNS3_TXD_FE_CMD (0x1 << HNS3_TXD_FE_B)
159#define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE \
160 (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE)
161#define HNS3_TXD_SEND_SIZE_SHIFT 16
162
163enum hns3_pkt_l2t_type {
164 HNS3_L2_TYPE_UNICAST,
165 HNS3_L2_TYPE_MULTICAST,
166 HNS3_L2_TYPE_BROADCAST,
167 HNS3_L2_TYPE_INVALID,
168};
169
170enum hns3_pkt_l3t_type {
171 HNS3_L3T_NONE,
172 HNS3_L3T_IPV6,
173 HNS3_L3T_IPV4,
174 HNS3_L3T_RESERVED
175};
176
177enum hns3_pkt_l4t_type {
178 HNS3_L4T_UNKNOWN,
179 HNS3_L4T_TCP,
180 HNS3_L4T_UDP,
181 HNS3_L4T_SCTP
182};
183
184enum hns3_pkt_ol3t_type {
185 HNS3_OL3T_NONE,
186 HNS3_OL3T_IPV6,
187 HNS3_OL3T_IPV4_NO_CSUM,
188 HNS3_OL3T_IPV4_CSUM
189};
190
191enum hns3_pkt_tun_type {
192 HNS3_TUN_NONE,
193 HNS3_TUN_MAC_IN_UDP,
194 HNS3_TUN_NVGRE,
195 HNS3_TUN_OTHER
196};
197
198
199struct hns3_desc {
200 union {
201 uint64_t addr;
202 uint64_t timestamp;
203
204 struct {
205 uint32_t addr0;
206 uint32_t addr1;
207 };
208 };
209 union {
210 struct {
211 uint16_t vlan_tag;
212 uint16_t send_size;
213 union {
214
215
216
217
218 uint32_t type_cs_vlan_tso_len;
219 struct {
220 uint8_t type_cs_vlan_tso;
221 uint8_t l2_len;
222 uint8_t l3_len;
223 uint8_t l4_len;
224 };
225 };
226 uint16_t outer_vlan_tag;
227 uint16_t tv;
228 union {
229
230 uint32_t ol_type_vlan_len_msec;
231 struct {
232 uint8_t ol_type_vlan_msec;
233 uint8_t ol2_len;
234 uint8_t ol3_len;
235 uint8_t ol4_len;
236 };
237 };
238
239 uint32_t paylen_fd_dop_ol4cs;
240 uint16_t tp_fe_sc_vld_ra_ri;
241 uint16_t mss;
242 } tx;
243
244 struct {
245 uint32_t l234_info;
246 uint16_t pkt_len;
247 uint16_t size;
248 uint32_t rss_hash;
249 uint16_t fd_id;
250 uint16_t vlan_tag;
251 union {
252 uint32_t ol_info;
253 struct {
254 uint16_t o_dm_vlan_id_fb;
255 uint16_t ot_vlan_tag;
256 };
257 };
258 union {
259 uint32_t bd_base_info;
260 struct {
261 uint16_t bdtype_vld_udp0;
262 uint16_t fe_lum_crcp_l3l4p;
263 };
264 };
265 } rx;
266 };
267} __rte_packed;
268
269struct hns3_entry {
270 struct rte_mbuf *mbuf;
271};
272
273struct hns3_rx_basic_stats {
274 uint64_t packets;
275 uint64_t bytes;
276 uint64_t errors;
277};
278
279struct hns3_rx_dfx_stats {
280 uint64_t l3_csum_errors;
281 uint64_t l4_csum_errors;
282 uint64_t ol3_csum_errors;
283 uint64_t ol4_csum_errors;
284};
285
286struct hns3_rx_bd_errors_stats {
287 uint64_t l2_errors;
288 uint64_t pkt_len_errors;
289};
290
291struct hns3_rx_queue {
292 volatile void *io_head_reg;
293 struct hns3_ptype_table *ptype_tbl;
294 struct rte_mempool *mb_pool;
295 struct hns3_desc *rx_ring;
296 struct hns3_entry *sw_ring;
297
298 uint16_t port_id;
299 uint16_t nb_rx_desc;
300
301
302
303
304 uint16_t rx_free_thresh;
305 uint16_t next_to_use;
306 uint16_t rx_free_hold;
307 uint16_t rx_rearm_start;
308 uint16_t rx_rearm_nb;
309
310
311 uint8_t crc_len;
312
313
314
315
316
317
318
319
320
321
322
323
324 uint8_t pvid_sw_discard_en:1;
325 uint8_t ptype_en:1;
326
327 uint64_t mbuf_initializer;
328
329 uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
330
331 uint16_t bulk_mbuf_num;
332
333 struct hns3_rx_basic_stats basic_stats;
334
335 struct rte_mbuf *pkt_first_seg;
336 struct rte_mbuf *pkt_last_seg;
337
338 struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
339
340
341 struct hns3_rx_dfx_stats dfx_stats;
342
343 struct hns3_rx_bd_errors_stats err_stats;
344
345 struct rte_mbuf fake_mbuf;
346
347
348
349
350
351 void *io_base;
352 struct hns3_adapter *hns;
353 uint64_t rx_ring_phys_addr;
354 const struct rte_memzone *mz;
355
356 uint16_t queue_id;
357 uint16_t rx_buf_len;
358
359 bool configured;
360 bool rx_deferred_start;
361 bool enabled;
362};
363
364struct hns3_tx_basic_stats {
365 uint64_t packets;
366 uint64_t bytes;
367};
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411struct hns3_tx_dfx_stats {
412 uint64_t over_length_pkt_cnt;
413 uint64_t exceed_limit_bd_pkt_cnt;
414 uint64_t exceed_limit_bd_reassem_fail;
415 uint64_t unsupported_tunnel_pkt_cnt;
416 uint64_t queue_full_cnt;
417 uint64_t pkt_padding_fail_cnt;
418};
419
420struct hns3_tx_queue {
421
422 volatile void *io_tail_reg;
423 struct hns3_desc *tx_ring;
424 struct hns3_entry *sw_ring;
425
426 uint16_t nb_tx_desc;
427
428
429
430
431 uint16_t next_to_clean;
432
433 uint16_t next_to_use;
434
435 uint16_t tx_bd_ready;
436
437
438 uint16_t tx_free_thresh;
439
440
441
442
443
444 uint8_t min_tx_pkt_len;
445
446 uint8_t max_non_tso_bd_num;
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464 uint16_t tso_mode:1;
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480 uint16_t udp_cksum_mode:1;
481
482 uint16_t simple_bd_enable:1;
483 uint16_t tx_push_enable:1;
484
485
486
487
488
489
490
491
492
493
494
495
496 uint16_t pvid_sw_shift_en:1;
497
498 uint16_t mbuf_fast_free_en:1;
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514 uint16_t tx_rs_thresh;
515 struct rte_mbuf **free;
516
517 struct hns3_tx_basic_stats basic_stats;
518 struct hns3_tx_dfx_stats dfx_stats;
519
520
521
522
523
524 void *io_base;
525 struct hns3_adapter *hns;
526 uint64_t tx_ring_phys_addr;
527 const struct rte_memzone *mz;
528
529 uint16_t port_id;
530 uint16_t queue_id;
531
532 bool configured;
533 bool tx_deferred_start;
534 bool enabled;
535};
536
537#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
538 ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready)
539
540struct hns3_queue_info {
541 const char *type;
542 const char *ring_name;
543 uint16_t idx;
544 uint16_t nb_desc;
545 unsigned int socket_id;
546};
547
548#define HNS3_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
549 RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
550 RTE_MBUF_F_TX_IP_CKSUM | \
551 RTE_MBUF_F_TX_TCP_SEG | \
552 RTE_MBUF_F_TX_L4_MASK)
553
554enum hns3_cksum_status {
555 HNS3_CKSUM_NONE = 0,
556 HNS3_L3_CKSUM_ERR = 1,
557 HNS3_L4_CKSUM_ERR = 2,
558 HNS3_OUTER_L3_CKSUM_ERR = 4,
559 HNS3_OUTER_L4_CKSUM_ERR = 8
560};
561
562extern uint64_t hns3_timestamp_rx_dynflag;
563extern int hns3_timestamp_dynfield_offset;
564
565static inline void
566hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq,
567 struct rte_mbuf *rxm,
568 uint32_t l234_info)
569{
570#define HNS3_RXD_CKSUM_ERR_MASK (BIT(HNS3_RXD_L3E_B) | \
571 BIT(HNS3_RXD_L4E_B) | \
572 BIT(HNS3_RXD_OL3E_B) | \
573 BIT(HNS3_RXD_OL4E_B))
574
575 if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
576 rxm->ol_flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
577 return;
578 }
579
580 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
581 rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
582 rxq->dfx_stats.l3_csum_errors++;
583 } else {
584 rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
585 }
586
587 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
588 rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
589 rxq->dfx_stats.l4_csum_errors++;
590 } else {
591 rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
592 }
593
594 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
595 rxq->dfx_stats.ol3_csum_errors++;
596
597 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
598 rxm->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
599 rxq->dfx_stats.ol4_csum_errors++;
600 }
601}
602
603static inline int
604hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
605 uint32_t bd_base_info, uint32_t l234_info)
606{
607#define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \
608 BIT(HNS3_RXD_TRUNCATE_B))
609
610
611
612
613
614
615
616 if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))
617 return -EINVAL;
618
619 if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
620 if (l234_info & BIT(HNS3_RXD_L2E_B))
621 rxq->err_stats.l2_errors++;
622 else
623 rxq->err_stats.pkt_len_errors++;
624 return -EINVAL;
625 }
626
627 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
628 hns3_rx_set_cksum_flag(rxq, rxm, l234_info);
629
630 return 0;
631}
632
633static inline uint32_t
634hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
635 const uint32_t ol_info)
636{
637 const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
638 uint32_t ol3id, ol4id;
639 uint32_t l3id, l4id;
640 uint32_t ptype;
641
642 if (rxq->ptype_en) {
643 ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M,
644 HNS3_RXD_PTYPE_S);
645 return ptype_tbl->ptype[ptype];
646 }
647
648 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
649 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
650 l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
651 l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
652
653 if (unlikely(ptype_tbl->ol4table[ol4id]))
654 return ptype_tbl->inner_l3table[l3id] |
655 ptype_tbl->inner_l4table[l4id] |
656 ptype_tbl->ol3table[ol3id] |
657 ptype_tbl->ol4table[ol4id];
658 else
659 return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id];
660}
661
662
663
664
665
666
667
668
669static inline void
670hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value)
671{
672 rte_io_wmb();
673 if (txq->tx_push_enable)
674 rte_write64_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
675 else
676 rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
677}
678
679void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
680void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
681void hns3_free_all_queues(struct rte_eth_dev *dev);
682int hns3_reset_all_tqps(struct hns3_adapter *hns);
683void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
684int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
685int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
686void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
687int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
688void hns3_start_tqps(struct hns3_hw *hw);
689void hns3_stop_tqps(struct hns3_hw *hw);
690int hns3_rxq_iterate(struct rte_eth_dev *dev,
691 int (*callback)(struct hns3_rx_queue *, void *), void *arg);
692void hns3_dev_release_mbufs(struct hns3_adapter *hns);
693int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
694 unsigned int socket, const struct rte_eth_rxconf *conf,
695 struct rte_mempool *mp);
696int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
697 unsigned int socket, const struct rte_eth_txconf *conf);
698uint32_t hns3_rx_queue_count(void *rx_queue);
699int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
700int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
701int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
702int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
703uint16_t hns3_recv_pkts_simple(void *rx_queue, struct rte_mbuf **rx_pkts,
704 uint16_t nb_pkts);
705uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
706 uint16_t nb_pkts);
707uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
708 uint16_t nb_pkts);
709uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
710 uint16_t nb_pkts);
711int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
712 __rte_unused uint16_t queue_id,
713 struct rte_eth_burst_mode *mode);
714uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
715 uint16_t nb_pkts);
716uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
717 uint16_t nb_pkts);
718uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
719 uint16_t nb_pkts);
720uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
721 uint16_t nb_pkts);
722uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
723 uint16_t nb_pkts);
724int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
725 __rte_unused uint16_t queue_id,
726 struct rte_eth_burst_mode *mode);
727const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
728void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
729void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
730eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev,
731 eth_tx_prep_t *prep);
732
733uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
734void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
735 uint8_t gl_idx, uint16_t gl_value);
736void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
737 uint16_t rl_value);
738void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
739 uint16_t ql_value);
740int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
741 uint16_t nb_tx_q);
742int hns3_config_gro(struct hns3_hw *hw, bool en);
743int hns3_restore_gro_conf(struct hns3_hw *hw);
744void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw);
745void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
746void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
747int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
748int hns3_tx_check_vec_support(struct rte_eth_dev *dev);
749void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq);
750void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
751 struct rte_eth_rxq_info *qinfo);
752void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
753 struct rte_eth_txq_info *qinfo);
754uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
755int hns3_start_all_txqs(struct rte_eth_dev *dev);
756int hns3_start_all_rxqs(struct rte_eth_dev *dev);
757void hns3_stop_all_txqs(struct rte_eth_dev *dev);
758void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
759int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
760void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
761int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
762int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
763void hns3_tx_push_init(struct rte_eth_dev *dev);
764void hns3_stop_tx_datapath(struct rte_eth_dev *dev);
765void hns3_start_tx_datapath(struct rte_eth_dev *dev);
766
767#endif
768