1
2
3
4
5#ifndef _HNS3_RXTX_H_
6#define _HNS3_RXTX_H_
7
8#include <stdint.h>
9#include <rte_mbuf_core.h>
10
11#define HNS3_MIN_RING_DESC 64
12#define HNS3_MAX_RING_DESC 32768
13#define HNS3_DEFAULT_RING_DESC 1024
14#define HNS3_ALIGN_RING_DESC 32
15#define HNS3_RING_BASE_ALIGN 128
16#define HNS3_BULK_ALLOC_MBUF_NUM 32
17
18#define HNS3_DEFAULT_RX_FREE_THRESH 32
19#define HNS3_DEFAULT_TX_FREE_THRESH 32
20#define HNS3_DEFAULT_TX_RS_THRESH 32
21#define HNS3_TX_FAST_FREE_AHEAD 64
22
23#define HNS3_DEFAULT_RX_BURST 64
24#if (HNS3_DEFAULT_RX_BURST > 64)
25#error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n"
26#endif
27#define HNS3_DEFAULT_DESCS_PER_LOOP 4
28#define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8
29#if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP)
30#define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP
31#else
32#define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP
33#endif
34#define HNS3_DEFAULT_RXQ_REARM_THRESH 64
35#define HNS3_UINT8_BIT 8
36#define HNS3_UINT16_BIT 16
37#define HNS3_UINT32_BIT 32
38
39#define HNS3_512_BD_BUF_SIZE 512
40#define HNS3_1K_BD_BUF_SIZE 1024
41#define HNS3_2K_BD_BUF_SIZE 2048
42#define HNS3_4K_BD_BUF_SIZE 4096
43
44#define HNS3_MIN_BD_BUF_SIZE HNS3_512_BD_BUF_SIZE
45#define HNS3_MAX_BD_BUF_SIZE HNS3_4K_BD_BUF_SIZE
46
47#define HNS3_BD_SIZE_512_TYPE 0
48#define HNS3_BD_SIZE_1024_TYPE 1
49#define HNS3_BD_SIZE_2048_TYPE 2
50#define HNS3_BD_SIZE_4096_TYPE 3
51
52#define HNS3_RX_FLAG_VLAN_PRESENT 0x1
53#define HNS3_RX_FLAG_L3ID_IPV4 0x0
54#define HNS3_RX_FLAG_L3ID_IPV6 0x1
55#define HNS3_RX_FLAG_L4ID_UDP 0x0
56#define HNS3_RX_FLAG_L4ID_TCP 0x1
57
58#define HNS3_RXD_DMAC_S 0
59#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
60#define HNS3_RXD_VLAN_S 2
61#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
62#define HNS3_RXD_L3ID_S 4
63#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
64#define HNS3_RXD_L4ID_S 8
65#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
66#define HNS3_RXD_FRAG_B 12
67#define HNS3_RXD_STRP_TAGP_S 13
68#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
69
70#define HNS3_RXD_L2E_B 16
71#define HNS3_RXD_L3E_B 17
72#define HNS3_RXD_L4E_B 18
73#define HNS3_RXD_TRUNCATE_B 19
74#define HNS3_RXD_HOI_B 20
75#define HNS3_RXD_DOI_B 21
76#define HNS3_RXD_OL3E_B 22
77#define HNS3_RXD_OL4E_B 23
78#define HNS3_RXD_GRO_COUNT_S 24
79#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
80#define HNS3_RXD_GRO_FIXID_B 30
81#define HNS3_RXD_GRO_ECN_B 31
82
83#define HNS3_RXD_ODMAC_S 0
84#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
85#define HNS3_RXD_OVLAN_S 2
86#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
87#define HNS3_RXD_OL3ID_S 4
88#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
89#define HNS3_RXD_OL4ID_S 8
90#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
91#define HNS3_RXD_PTYPE_S 4
92#define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S)
93#define HNS3_RXD_FBHI_S 12
94#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
95#define HNS3_RXD_FBLI_S 14
96#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
97
98#define HNS3_RXD_BDTYPE_S 0
99#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
100#define HNS3_RXD_VLD_B 4
101#define HNS3_RXD_UDP0_B 5
102#define HNS3_RXD_EXTEND_B 7
103#define HNS3_RXD_FE_B 8
104#define HNS3_RXD_LUM_B 9
105#define HNS3_RXD_CRCP_B 10
106#define HNS3_RXD_L3L4P_B 11
107
108#define HNS3_RXD_TS_VLD_B 14
109#define HNS3_RXD_GRO_SIZE_S 16
110#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
111
112#define HNS3_TXD_L3T_S 0
113#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
114#define HNS3_TXD_L4T_S 2
115#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
116#define HNS3_TXD_L3CS_B 4
117#define HNS3_TXD_L4CS_B 5
118#define HNS3_TXD_VLAN_B 6
119#define HNS3_TXD_TSO_B 7
120
121#define HNS3_TXD_L2LEN_S 8
122#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
123#define HNS3_TXD_L3LEN_S 16
124#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
125#define HNS3_TXD_L4LEN_S 24
126#define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S)
127
128#define HNS3_TXD_OL3T_S 0
129#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
130#define HNS3_TXD_OVLAN_B 2
131#define HNS3_TXD_MACSEC_B 3
132#define HNS3_TXD_TUNTYPE_S 4
133#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
134
135#define HNS3_TXD_BDTYPE_S 0
136#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
137#define HNS3_TXD_FE_B 4
138#define HNS3_TXD_SC_S 5
139#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
140#define HNS3_TXD_EXTEND_B 7
141#define HNS3_TXD_VLD_B 8
142#define HNS3_TXD_RI_B 9
143#define HNS3_TXD_RA_B 10
144#define HNS3_TXD_TSYN_B 11
145#define HNS3_TXD_DECTTL_S 12
146#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
147
148#define HNS3_TXD_MSS_S 0
149#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
150
151#define HNS3_TXD_OL4CS_B 22
152#define HNS3_L2_LEN_UNIT 1UL
153#define HNS3_L3_LEN_UNIT 2UL
154#define HNS3_L4_LEN_UNIT 2UL
155
156#define HNS3_TXD_DEFAULT_BDTYPE 0
157#define HNS3_TXD_VLD_CMD (0x1 << HNS3_TXD_VLD_B)
158#define HNS3_TXD_FE_CMD (0x1 << HNS3_TXD_FE_B)
159#define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE \
160 (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE)
161#define HNS3_TXD_SEND_SIZE_SHIFT 16
162
163enum hns3_pkt_l2t_type {
164 HNS3_L2_TYPE_UNICAST,
165 HNS3_L2_TYPE_MULTICAST,
166 HNS3_L2_TYPE_BROADCAST,
167 HNS3_L2_TYPE_INVALID,
168};
169
170enum hns3_pkt_l3t_type {
171 HNS3_L3T_NONE,
172 HNS3_L3T_IPV6,
173 HNS3_L3T_IPV4,
174 HNS3_L3T_RESERVED
175};
176
177enum hns3_pkt_l4t_type {
178 HNS3_L4T_UNKNOWN,
179 HNS3_L4T_TCP,
180 HNS3_L4T_UDP,
181 HNS3_L4T_SCTP
182};
183
184enum hns3_pkt_ol3t_type {
185 HNS3_OL3T_NONE,
186 HNS3_OL3T_IPV6,
187 HNS3_OL3T_IPV4_NO_CSUM,
188 HNS3_OL3T_IPV4_CSUM
189};
190
191enum hns3_pkt_tun_type {
192 HNS3_TUN_NONE,
193 HNS3_TUN_MAC_IN_UDP,
194 HNS3_TUN_NVGRE,
195 HNS3_TUN_OTHER
196};
197
198
199struct hns3_desc {
200 union {
201 uint64_t addr;
202 uint64_t timestamp;
203
204 struct {
205 uint32_t addr0;
206 uint32_t addr1;
207 };
208 };
209 union {
210 struct {
211 uint16_t vlan_tag;
212 uint16_t send_size;
213 union {
214
215
216
217
218 uint32_t type_cs_vlan_tso_len;
219 struct {
220 uint8_t type_cs_vlan_tso;
221 uint8_t l2_len;
222 uint8_t l3_len;
223 uint8_t l4_len;
224 };
225 };
226 uint16_t outer_vlan_tag;
227 uint16_t tv;
228 union {
229
230 uint32_t ol_type_vlan_len_msec;
231 struct {
232 uint8_t ol_type_vlan_msec;
233 uint8_t ol2_len;
234 uint8_t ol3_len;
235 uint8_t ol4_len;
236 };
237 };
238
239 uint32_t paylen_fd_dop_ol4cs;
240 uint16_t tp_fe_sc_vld_ra_ri;
241 uint16_t mss;
242 } tx;
243
244 struct {
245 uint32_t l234_info;
246 uint16_t pkt_len;
247 uint16_t size;
248 uint32_t rss_hash;
249 uint16_t fd_id;
250 uint16_t vlan_tag;
251 union {
252 uint32_t ol_info;
253 struct {
254 uint16_t o_dm_vlan_id_fb;
255 uint16_t ot_vlan_tag;
256 };
257 };
258 union {
259 uint32_t bd_base_info;
260 struct {
261 uint16_t bdtype_vld_udp0;
262 uint16_t fe_lum_crcp_l3l4p;
263 };
264 };
265 } rx;
266 };
267} __rte_packed;
268
269struct hns3_entry {
270 struct rte_mbuf *mbuf;
271};
272
273struct hns3_rx_basic_stats {
274 uint64_t packets;
275 uint64_t bytes;
276 uint64_t errors;
277};
278
279struct hns3_rx_dfx_stats {
280 uint64_t l3_csum_errors;
281 uint64_t l4_csum_errors;
282 uint64_t ol3_csum_errors;
283 uint64_t ol4_csum_errors;
284};
285
286struct hns3_rx_bd_errors_stats {
287 uint64_t l2_errors;
288 uint64_t pkt_len_errors;
289};
290
291struct hns3_rx_queue {
292 volatile void *io_head_reg;
293 struct hns3_ptype_table *ptype_tbl;
294 struct rte_mempool *mb_pool;
295 struct hns3_desc *rx_ring;
296 struct hns3_entry *sw_ring;
297
298 uint16_t port_id;
299 uint16_t nb_rx_desc;
300
301
302
303
304 uint16_t rx_free_thresh;
305 uint16_t next_to_use;
306 uint16_t rx_free_hold;
307 uint16_t rx_rearm_start;
308 uint16_t rx_rearm_nb;
309
310
311 uint8_t crc_len;
312
313
314
315
316
317
318
319
320
321
322
323
324 uint8_t pvid_sw_discard_en:1;
325 uint8_t ptype_en:1;
326
327 uint64_t mbuf_initializer;
328
329 uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
330
331 uint16_t bulk_mbuf_num;
332
333 struct hns3_rx_basic_stats basic_stats;
334
335 struct rte_mbuf *pkt_first_seg;
336 struct rte_mbuf *pkt_last_seg;
337
338 struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
339
340
341 struct hns3_rx_dfx_stats dfx_stats;
342
343 struct hns3_rx_bd_errors_stats err_stats;
344
345 struct rte_mbuf fake_mbuf;
346
347
348
349
350
351
352 void *io_base;
353 struct hns3_adapter *hns;
354 uint64_t rx_ring_phys_addr;
355 const struct rte_memzone *mz;
356
357 uint16_t queue_id;
358 uint16_t rx_buf_len;
359
360 bool configured;
361 bool rx_deferred_start;
362 bool enabled;
363};
364
365struct hns3_tx_basic_stats {
366 uint64_t packets;
367 uint64_t bytes;
368};
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412struct hns3_tx_dfx_stats {
413 uint64_t over_length_pkt_cnt;
414 uint64_t exceed_limit_bd_pkt_cnt;
415 uint64_t exceed_limit_bd_reassem_fail;
416 uint64_t unsupported_tunnel_pkt_cnt;
417 uint64_t queue_full_cnt;
418 uint64_t pkt_padding_fail_cnt;
419};
420
421struct hns3_tx_queue {
422
423 volatile void *io_tail_reg;
424 struct hns3_desc *tx_ring;
425 struct hns3_entry *sw_ring;
426
427 uint16_t nb_tx_desc;
428
429
430
431
432 uint16_t next_to_clean;
433
434 uint16_t next_to_use;
435
436 uint16_t tx_bd_ready;
437
438
439 uint16_t tx_free_thresh;
440
441
442
443
444
445 uint8_t min_tx_pkt_len;
446
447 uint8_t max_non_tso_bd_num;
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465 uint16_t tso_mode:1;
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481 uint16_t udp_cksum_mode:1;
482
483 uint16_t simple_bd_enable:1;
484 uint16_t tx_push_enable:1;
485
486
487
488
489
490
491
492
493
494
495
496
497 uint16_t pvid_sw_shift_en:1;
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513 uint16_t tx_rs_thresh;
514 struct rte_mbuf **free;
515
516 struct hns3_tx_basic_stats basic_stats;
517 struct hns3_tx_dfx_stats dfx_stats;
518
519
520
521
522
523
524 void *io_base;
525 struct hns3_adapter *hns;
526 uint64_t tx_ring_phys_addr;
527 const struct rte_memzone *mz;
528
529 uint16_t port_id;
530 uint16_t queue_id;
531
532 bool configured;
533 bool tx_deferred_start;
534 bool enabled;
535};
536
537#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
538 ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready)
539
540struct hns3_queue_info {
541 const char *type;
542 const char *ring_name;
543 uint16_t idx;
544 uint16_t nb_desc;
545 unsigned int socket_id;
546};
547
548#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
549 PKT_TX_OUTER_UDP_CKSUM | \
550 PKT_TX_OUTER_IP_CKSUM | \
551 PKT_TX_IP_CKSUM | \
552 PKT_TX_TCP_SEG | \
553 PKT_TX_L4_MASK)
554
555enum hns3_cksum_status {
556 HNS3_CKSUM_NONE = 0,
557 HNS3_L3_CKSUM_ERR = 1,
558 HNS3_L4_CKSUM_ERR = 2,
559 HNS3_OUTER_L3_CKSUM_ERR = 4,
560 HNS3_OUTER_L4_CKSUM_ERR = 8
561};
562
563extern uint64_t hns3_timestamp_rx_dynflag;
564extern int hns3_timestamp_dynfield_offset;
565
566static inline void
567hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq,
568 struct rte_mbuf *rxm,
569 uint32_t l234_info)
570{
571#define HNS3_RXD_CKSUM_ERR_MASK (BIT(HNS3_RXD_L3E_B) | \
572 BIT(HNS3_RXD_L4E_B) | \
573 BIT(HNS3_RXD_OL3E_B) | \
574 BIT(HNS3_RXD_OL4E_B))
575
576 if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
577 rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
578 return;
579 }
580
581 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
582 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
583 rxq->dfx_stats.l3_csum_errors++;
584 } else {
585 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
586 }
587
588 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
589 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
590 rxq->dfx_stats.l4_csum_errors++;
591 } else {
592 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
593 }
594
595 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
596 rxq->dfx_stats.ol3_csum_errors++;
597
598 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
599 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
600 rxq->dfx_stats.ol4_csum_errors++;
601 }
602}
603
604static inline int
605hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
606 uint32_t bd_base_info, uint32_t l234_info)
607{
608#define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \
609 BIT(HNS3_RXD_TRUNCATE_B))
610
611
612
613
614
615
616
617 if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))
618 return -EINVAL;
619
620 if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
621 if (l234_info & BIT(HNS3_RXD_L2E_B))
622 rxq->err_stats.l2_errors++;
623 else
624 rxq->err_stats.pkt_len_errors++;
625 return -EINVAL;
626 }
627
628 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
629 hns3_rx_set_cksum_flag(rxq, rxm, l234_info);
630
631 return 0;
632}
633
634static inline uint32_t
635hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
636 const uint32_t ol_info)
637{
638 const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
639 uint32_t ol3id, ol4id;
640 uint32_t l3id, l4id;
641 uint32_t ptype;
642
643 if (rxq->ptype_en) {
644 ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M,
645 HNS3_RXD_PTYPE_S);
646 return ptype_tbl->ptype[ptype];
647 }
648
649 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
650 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
651 l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
652 l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
653
654 if (unlikely(ptype_tbl->ol4table[ol4id]))
655 return ptype_tbl->inner_l3table[l3id] |
656 ptype_tbl->inner_l4table[l4id] |
657 ptype_tbl->ol3table[ol3id] |
658 ptype_tbl->ol4table[ol4id];
659 else
660 return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id];
661}
662
663
664
665
666
667
668
669
670static inline void
671hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value)
672{
673 rte_io_wmb();
674 if (txq->tx_push_enable)
675 rte_write64_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
676 else
677 rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
678}
679
680void hns3_dev_rx_queue_release(void *queue);
681void hns3_dev_tx_queue_release(void *queue);
682void hns3_free_all_queues(struct rte_eth_dev *dev);
683int hns3_reset_all_tqps(struct hns3_adapter *hns);
684void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
685int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
686int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
687void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
688int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
689void hns3_start_tqps(struct hns3_hw *hw);
690void hns3_stop_tqps(struct hns3_hw *hw);
691int hns3_rxq_iterate(struct rte_eth_dev *dev,
692 int (*callback)(struct hns3_rx_queue *, void *), void *arg);
693void hns3_dev_release_mbufs(struct hns3_adapter *hns);
694int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
695 unsigned int socket, const struct rte_eth_rxconf *conf,
696 struct rte_mempool *mp);
697int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
698 unsigned int socket, const struct rte_eth_txconf *conf);
699uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
700int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
701int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
702int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
703int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
704uint16_t hns3_recv_pkts_simple(void *rx_queue, struct rte_mbuf **rx_pkts,
705 uint16_t nb_pkts);
706uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
707 uint16_t nb_pkts);
708uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
709 uint16_t nb_pkts);
710uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
711 uint16_t nb_pkts);
712int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
713 __rte_unused uint16_t queue_id,
714 struct rte_eth_burst_mode *mode);
715int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
716uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
717 uint16_t nb_pkts);
718uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
719 uint16_t nb_pkts);
720uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
721 uint16_t nb_pkts);
722uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
723 uint16_t nb_pkts);
724uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
725 uint16_t nb_pkts);
726int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
727 __rte_unused uint16_t queue_id,
728 struct rte_eth_burst_mode *mode);
729const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
730void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
731void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
732uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
733void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
734 uint8_t gl_idx, uint16_t gl_value);
735void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
736 uint16_t rl_value);
737void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
738 uint16_t ql_value);
739int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
740 uint16_t nb_tx_q);
741int hns3_config_gro(struct hns3_hw *hw, bool en);
742int hns3_restore_gro_conf(struct hns3_hw *hw);
743void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw);
744void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
745void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
746int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
747int hns3_tx_check_vec_support(struct rte_eth_dev *dev);
748void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq);
749void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
750 struct rte_eth_rxq_info *qinfo);
751void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
752 struct rte_eth_txq_info *qinfo);
753uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
754int hns3_start_all_txqs(struct rte_eth_dev *dev);
755int hns3_start_all_rxqs(struct rte_eth_dev *dev);
756void hns3_stop_all_txqs(struct rte_eth_dev *dev);
757void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
758int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
759void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
760int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
761int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
762void hns3_tx_push_init(struct rte_eth_dev *dev);
763
764#endif
765