1
2
3
4
5#ifndef _IAVF_RXTX_H_
6#define _IAVF_RXTX_H_
7
8
9#define IAVF_ALIGN_RING_DESC 32
10#define IAVF_MIN_RING_DESC 64
11#define IAVF_MAX_RING_DESC 4096
12#define IAVF_DMA_MEM_ALIGN 4096
13
14#define IAVF_RING_BASE_ALIGN 128
15
16
17#define IAVF_RX_MAX_BURST 32
18
19
20#define IAVF_VPMD_RX_MAX_BURST 32
21#define IAVF_VPMD_TX_MAX_BURST 32
22#define IAVF_RXQ_REARM_THRESH 32
23#define IAVF_VPMD_DESCS_PER_LOOP 4
24#define IAVF_VPMD_TX_MAX_FREE_BUF 64
25
26#define IAVF_NO_VECTOR_FLAGS ( \
27 DEV_TX_OFFLOAD_MULTI_SEGS | \
28 DEV_TX_OFFLOAD_VLAN_INSERT | \
29 DEV_TX_OFFLOAD_SCTP_CKSUM | \
30 DEV_TX_OFFLOAD_UDP_CKSUM | \
31 DEV_TX_OFFLOAD_TCP_TSO | \
32 DEV_TX_OFFLOAD_TCP_CKSUM)
33
34#define DEFAULT_TX_RS_THRESH 32
35#define DEFAULT_TX_FREE_THRESH 32
36
37#define IAVF_MIN_TSO_MSS 256
38#define IAVF_MAX_TSO_MSS 9668
39#define IAVF_TSO_MAX_SEG UINT8_MAX
40#define IAVF_TX_MAX_MTU_SEG 8
41
42#define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
43 PKT_TX_IP_CKSUM | \
44 PKT_TX_L4_MASK | \
45 PKT_TX_TCP_SEG)
46
47#define IAVF_TX_OFFLOAD_MASK ( \
48 PKT_TX_OUTER_IPV6 | \
49 PKT_TX_OUTER_IPV4 | \
50 PKT_TX_IPV6 | \
51 PKT_TX_IPV4 | \
52 PKT_TX_VLAN_PKT | \
53 PKT_TX_IP_CKSUM | \
54 PKT_TX_L4_MASK | \
55 PKT_TX_TCP_SEG)
56
57#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
58 (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
59
60
61
62
63
64union iavf_16b_rx_flex_desc {
65 struct {
66 __le64 pkt_addr;
67 __le64 hdr_addr;
68
69 } read;
70 struct {
71
72 u8 rxdid;
73 u8 mir_id_umb_cast;
74 __le16 ptype_flex_flags0;
75 __le16 pkt_len;
76 __le16 hdr_len_sph_flex_flags1;
77
78
79
80
81 __le16 status_error0;
82 __le16 l2tag1;
83 __le16 flex_meta0;
84 __le16 flex_meta1;
85 } wb;
86};
87
88union iavf_32b_rx_flex_desc {
89 struct {
90 __le64 pkt_addr;
91 __le64 hdr_addr;
92
93 __le64 rsvd1;
94 __le64 rsvd2;
95 } read;
96 struct {
97
98 u8 rxdid;
99 u8 mir_id_umb_cast;
100 __le16 ptype_flex_flags0;
101 __le16 pkt_len;
102 __le16 hdr_len_sph_flex_flags1;
103
104
105
106
107 __le16 status_error0;
108 __le16 l2tag1;
109 __le16 flex_meta0;
110 __le16 flex_meta1;
111
112
113 __le16 status_error1;
114 u8 flex_flags2;
115 u8 time_stamp_low;
116 __le16 l2tag2_1st;
117 __le16 l2tag2_2nd;
118
119
120 __le16 flex_meta2;
121 __le16 flex_meta3;
122 union {
123 struct {
124 __le16 flex_meta4;
125 __le16 flex_meta5;
126 } flex;
127 __le32 ts_high;
128 } flex_ts;
129 } wb;
130};
131
132
133#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
134#define iavf_rx_desc iavf_16byte_rx_desc
135#define iavf_rx_flex_desc iavf_16b_rx_flex_desc
136#else
137#define iavf_rx_desc iavf_32byte_rx_desc
138#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
139#endif
140
141typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
142 struct rte_mbuf *mb,
143 volatile union iavf_rx_flex_desc *rxdp);
144
145struct iavf_rxq_ops {
146 void (*release_mbufs)(struct iavf_rx_queue *rxq);
147};
148
149struct iavf_txq_ops {
150 void (*release_mbufs)(struct iavf_tx_queue *txq);
151};
152
153
154struct iavf_rx_queue {
155 struct rte_mempool *mp;
156 const struct rte_memzone *mz;
157 volatile union iavf_rx_desc *rx_ring;
158 uint64_t rx_ring_phys_addr;
159 struct rte_mbuf **sw_ring;
160 uint16_t nb_rx_desc;
161 uint16_t rx_tail;
162 volatile uint8_t *qrx_tail;
163 uint16_t rx_free_thresh;
164 uint16_t nb_rx_hold;
165 struct rte_mbuf *pkt_first_seg;
166 struct rte_mbuf *pkt_last_seg;
167 struct rte_mbuf fake_mbuf;
168 uint8_t rxdid;
169
170
171 uint16_t rxrearm_nb;
172 uint16_t rxrearm_start;
173 uint64_t mbuf_initializer;
174
175
176 uint16_t rx_nb_avail;
177 uint16_t rx_next_avail;
178 uint16_t rx_free_trigger;
179 struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2];
180
181 uint16_t port_id;
182 uint8_t crc_len;
183 uint8_t fdir_enabled;
184 uint16_t queue_id;
185 uint16_t rx_buf_len;
186 uint16_t rx_hdr_len;
187 uint16_t max_pkt_len;
188 struct iavf_vsi *vsi;
189
190 bool q_set;
191 bool rx_deferred_start;
192 const struct iavf_rxq_ops *ops;
193 uint8_t proto_xtr;
194 uint64_t xtr_ol_flag;
195
196 iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
197
198};
199
200struct iavf_tx_entry {
201 struct rte_mbuf *mbuf;
202 uint16_t next_id;
203 uint16_t last_id;
204};
205
206struct iavf_tx_vec_entry {
207 struct rte_mbuf *mbuf;
208};
209
210
211struct iavf_tx_queue {
212 const struct rte_memzone *mz;
213 volatile struct iavf_tx_desc *tx_ring;
214 uint64_t tx_ring_phys_addr;
215 struct iavf_tx_entry *sw_ring;
216 uint16_t nb_tx_desc;
217 uint16_t tx_tail;
218 volatile uint8_t *qtx_tail;
219
220 uint16_t nb_used;
221 uint16_t nb_free;
222 uint16_t last_desc_cleaned;
223 uint16_t free_thresh;
224 uint16_t rs_thresh;
225
226 uint16_t port_id;
227 uint16_t queue_id;
228 uint64_t offloads;
229 uint16_t next_dd;
230 uint16_t next_rs;
231
232 bool q_set;
233 bool tx_deferred_start;
234 const struct iavf_txq_ops *ops;
235#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
236#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
237 uint8_t vlan_flag;
238};
239
240
241union iavf_tx_offload {
242 uint64_t data;
243 struct {
244 uint64_t l2_len:7;
245 uint64_t l3_len:9;
246 uint64_t l4_len:8;
247 uint64_t tso_segsz:16;
248
249 };
250};
251
252
253
254
255
256
257
258
259
260
261struct iavf_32b_rx_flex_desc_comms {
262
263 u8 rxdid;
264 u8 mir_id_umb_cast;
265 __le16 ptype_flexi_flags0;
266 __le16 pkt_len;
267 __le16 hdr_len_sph_flex_flags1;
268
269
270 __le16 status_error0;
271 __le16 l2tag1;
272 __le32 rss_hash;
273
274
275 __le16 status_error1;
276 u8 flexi_flags2;
277 u8 ts_low;
278 __le16 l2tag2_1st;
279 __le16 l2tag2_2nd;
280
281
282 __le32 flow_id;
283 union {
284 struct {
285 __le16 aux0;
286 __le16 aux1;
287 } flex;
288 __le32 ts_high;
289 } flex_ts;
290};
291
292
293
294
295
296
297
298
299
300
301struct iavf_32b_rx_flex_desc_comms_ovs {
302
303 u8 rxdid;
304 u8 mir_id_umb_cast;
305 __le16 ptype_flexi_flags0;
306 __le16 pkt_len;
307 __le16 hdr_len_sph_flex_flags1;
308
309
310 __le16 status_error0;
311 __le16 l2tag1;
312 __le32 flow_id;
313
314
315 __le16 status_error1;
316 u8 flexi_flags2;
317 u8 ts_low;
318 __le16 l2tag2_1st;
319 __le16 l2tag2_2nd;
320
321
322 __le32 rss_hash;
323 union {
324 struct {
325 __le16 aux0;
326 __le16 aux1;
327 } flex;
328 __le32 ts_high;
329 } flex_ts;
330};
331
332
333
334
335
336
337enum iavf_rxdid {
338 IAVF_RXDID_LEGACY_0 = 0,
339 IAVF_RXDID_LEGACY_1 = 1,
340 IAVF_RXDID_FLEX_NIC = 2,
341 IAVF_RXDID_FLEX_NIC_2 = 6,
342 IAVF_RXDID_HW = 7,
343 IAVF_RXDID_COMMS_GENERIC = 16,
344 IAVF_RXDID_COMMS_AUX_VLAN = 17,
345 IAVF_RXDID_COMMS_AUX_IPV4 = 18,
346 IAVF_RXDID_COMMS_AUX_IPV6 = 19,
347 IAVF_RXDID_COMMS_AUX_IPV6_FLOW = 20,
348 IAVF_RXDID_COMMS_AUX_TCP = 21,
349 IAVF_RXDID_COMMS_OVS_1 = 22,
350 IAVF_RXDID_COMMS_OVS_2 = 23,
351 IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25,
352 IAVF_RXDID_LAST = 63,
353};
354
355enum iavf_rx_flex_desc_status_error_0_bits {
356
357 IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
358 IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
359 IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
360 IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
361 IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
362 IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
363 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
364 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
365 IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
366 IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
367 IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
368 IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
369 IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
370 IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
371 IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
372 IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
373 IAVF_RX_FLEX_DESC_STATUS0_LAST
374};
375
376enum iavf_rx_flex_desc_status_error_1_bits {
377
378 IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0,
379 IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
380 IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
381
382 IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
383 IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
384 IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
385 IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
386 IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
387 IAVF_RX_FLEX_DESC_STATUS1_LAST
388};
389
390
391#define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF)
392
393
394#define IAVF_RX_FLX_DESC_PKT_LEN_M (0x3FFF)
395
396int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
397 uint16_t queue_idx,
398 uint16_t nb_desc,
399 unsigned int socket_id,
400 const struct rte_eth_rxconf *rx_conf,
401 struct rte_mempool *mp);
402
403int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
404int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
405void iavf_dev_rx_queue_release(void *rxq);
406
407int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
408 uint16_t queue_idx,
409 uint16_t nb_desc,
410 unsigned int socket_id,
411 const struct rte_eth_txconf *tx_conf);
412int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
413int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
414int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
415void iavf_dev_tx_queue_release(void *txq);
416void iavf_stop_queues(struct rte_eth_dev *dev);
417uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
418 uint16_t nb_pkts);
419uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
420 struct rte_mbuf **rx_pkts,
421 uint16_t nb_pkts);
422uint16_t iavf_recv_scattered_pkts(void *rx_queue,
423 struct rte_mbuf **rx_pkts,
424 uint16_t nb_pkts);
425uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
426 struct rte_mbuf **rx_pkts,
427 uint16_t nb_pkts);
428uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
429 uint16_t nb_pkts);
430uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
431 uint16_t nb_pkts);
432void iavf_set_rx_function(struct rte_eth_dev *dev);
433void iavf_set_tx_function(struct rte_eth_dev *dev);
434void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
435 struct rte_eth_rxq_info *qinfo);
436void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
437 struct rte_eth_txq_info *qinfo);
438uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
439int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
440int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
441
442uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
443 uint16_t nb_pkts);
444uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
445 uint16_t nb_pkts);
446uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
447 struct rte_mbuf **rx_pkts,
448 uint16_t nb_pkts);
449uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
450 struct rte_mbuf **rx_pkts,
451 uint16_t nb_pkts);
452uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
453 uint16_t nb_pkts);
454uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
455 uint16_t nb_pkts);
456uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
457 struct rte_mbuf **rx_pkts,
458 uint16_t nb_pkts);
459uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
460 struct rte_mbuf **rx_pkts,
461 uint16_t nb_pkts);
462uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
463 struct rte_mbuf **rx_pkts,
464 uint16_t nb_pkts);
465uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
466 uint16_t nb_pkts);
467uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
468 uint16_t nb_pkts);
469int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
470int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
471int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
472int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
473uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
474 uint16_t nb_pkts);
475uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
476 struct rte_mbuf **rx_pkts,
477 uint16_t nb_pkts);
478uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
479 struct rte_mbuf **rx_pkts,
480 uint16_t nb_pkts);
481uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
482 struct rte_mbuf **rx_pkts,
483 uint16_t nb_pkts);
484uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
485 uint16_t nb_pkts);
486int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
487
488uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
489
490const uint32_t *iavf_get_default_ptype_table(void);
491
492static inline
493void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
494 const volatile void *desc,
495 uint16_t rx_id)
496{
497#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
498 const volatile union iavf_16byte_rx_desc *rx_desc = desc;
499
500 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
501 rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
502 rx_desc->read.hdr_addr);
503#else
504 const volatile union iavf_32byte_rx_desc *rx_desc = desc;
505
506 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
507 " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
508 rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
509 rx_desc->read.rsvd1, rx_desc->read.rsvd2);
510#endif
511}
512
513
514
515
516static inline
517void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
518 const volatile void *desc, uint16_t tx_id)
519{
520 const char *name;
521 const volatile struct iavf_tx_desc *tx_desc = desc;
522 enum iavf_tx_desc_dtype_value type;
523
524 type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
525 tx_desc->cmd_type_offset_bsz &
526 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
527 switch (type) {
528 case IAVF_TX_DESC_DTYPE_DATA:
529 name = "Tx_data_desc";
530 break;
531 case IAVF_TX_DESC_DTYPE_CONTEXT:
532 name = "Tx_context_desc";
533 break;
534 default:
535 name = "unknown_desc";
536 break;
537 }
538
539 printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
540 txq->queue_id, name, tx_id, tx_desc->buffer_addr,
541 tx_desc->cmd_type_offset_bsz);
542}
543
544#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
545 int i; \
546 for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
547 struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
548 if (!rxq) \
549 continue; \
550 rxq->fdir_enabled = on; \
551 } \
552 PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
553} while (0)
554
555
556static inline
557void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
558{
559 if (on) {
560
561 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
562 ad->fdir_ref_cnt++;
563 } else {
564 if (ad->fdir_ref_cnt >= 1) {
565 ad->fdir_ref_cnt--;
566
567 if (ad->fdir_ref_cnt == 0)
568 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
569 }
570 }
571}
572
573#ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
574#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
575 iavf_dump_rx_descriptor(rxq, desc, rx_id)
576#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \
577 iavf_dump_tx_descriptor(txq, desc, tx_id)
578#else
579#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
580#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
581#endif
582
583#endif
584