1
2
3
4
5#include <inttypes.h>
6
7#include <ethdev_driver.h>
8#include <rte_common.h>
9#include <rte_net.h>
10#include "fm10k.h"
11#include "base/fm10k_type.h"
12
13#ifdef RTE_PMD_PACKET_PREFETCH
14#define rte_packet_prefetch(p) rte_prefetch1(p)
15#else
16#define rte_packet_prefetch(p) do {} while (0)
17#endif
18
19#ifdef RTE_ETHDEV_DEBUG_RX
20static inline void dump_rxd(union fm10k_rx_desc *rxd)
21{
22 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
23 PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
24 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
25 rxd->d.data);
26 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
27 PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
28 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
29 rxd->d.staterr);
30 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
31 PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
32 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
33 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
34 PMD_RX_LOG(DEBUG, "| TIME TAG |");
35 PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
36 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
37}
38#endif
39
40#define FM10K_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN | \
41 RTE_MBUF_F_TX_IPV6 | \
42 RTE_MBUF_F_TX_IPV4 | \
43 RTE_MBUF_F_TX_IP_CKSUM | \
44 RTE_MBUF_F_TX_L4_MASK | \
45 RTE_MBUF_F_TX_TCP_SEG)
46
47#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
48 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
49
50
51
52
53static inline void
54rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
55{
56 static const uint32_t
57 ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
58 __rte_cache_aligned = {
59 [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
60 [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
61 [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
62 RTE_PTYPE_L3_IPV4_EXT,
63 [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
64 [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
65 RTE_PTYPE_L3_IPV6_EXT,
66 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
67 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
68 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
69 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
70 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
71 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
72 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
73 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
74 };
75
76 m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
77 >> FM10K_RXD_PKTTYPE_SHIFT];
78
79 if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
80 m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
81
82 if (unlikely((d->d.staterr &
83 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
84 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
85 m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
86 else
87 m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
88
89 if (unlikely((d->d.staterr &
90 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
91 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
92 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
93 else
94 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
95}
96
97uint16_t
98fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
99 uint16_t nb_pkts)
100{
101 struct rte_mbuf *mbuf;
102 union fm10k_rx_desc desc;
103 struct fm10k_rx_queue *q = rx_queue;
104 uint16_t count = 0;
105 int alloc = 0;
106 uint16_t next_dd;
107 int ret;
108
109 next_dd = q->next_dd;
110
111 nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
112 for (count = 0; count < nb_pkts; ++count) {
113 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
114 break;
115 mbuf = q->sw_ring[next_dd];
116 desc = q->hw_ring[next_dd];
117#ifdef RTE_ETHDEV_DEBUG_RX
118 dump_rxd(&desc);
119#endif
120 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
121 rte_pktmbuf_data_len(mbuf) = desc.w.length;
122
123 mbuf->ol_flags = 0;
124#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
125 rx_desc_to_ol_flags(mbuf, &desc);
126#endif
127
128 mbuf->hash.rss = desc.d.rss;
129
130
131
132
133
134
135
136 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
137 mbuf->vlan_tci = desc.w.vlan;
138
139
140
141
142 if (q->rx_ftag_en)
143 mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
144
145 rx_pkts[count] = mbuf;
146 if (++next_dd == q->nb_desc) {
147 next_dd = 0;
148 alloc = 1;
149 }
150
151
152 rte_prefetch0(q->sw_ring[next_dd]);
153
154
155
156
157
158
159 if ((next_dd & 0x3) == 0) {
160 rte_prefetch0(&q->hw_ring[next_dd]);
161 rte_prefetch0(&q->sw_ring[next_dd]);
162 }
163 }
164
165 q->next_dd = next_dd;
166
167 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
168 ret = rte_mempool_get_bulk(q->mp,
169 (void **)&q->sw_ring[q->next_alloc],
170 q->alloc_thresh);
171
172 if (unlikely(ret != 0)) {
173 uint16_t port = q->port_id;
174 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
175
176
177
178
179 q->next_dd = (q->next_dd + q->nb_desc - count) %
180 q->nb_desc;
181 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
182 return 0;
183 }
184
185 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
186 mbuf = q->sw_ring[q->next_alloc];
187
188
189 fm10k_pktmbuf_reset(mbuf, q->port_id);
190
191
192 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
193 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
194 q->hw_ring[q->next_alloc] = desc;
195 }
196 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
197 q->next_trigger += q->alloc_thresh;
198 if (q->next_trigger >= q->nb_desc) {
199 q->next_trigger = q->alloc_thresh - 1;
200 q->next_alloc = 0;
201 }
202 }
203
204 return count;
205}
206
207uint16_t
208fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
209 uint16_t nb_pkts)
210{
211 struct rte_mbuf *mbuf;
212 union fm10k_rx_desc desc;
213 struct fm10k_rx_queue *q = rx_queue;
214 uint16_t count = 0;
215 uint16_t nb_rcv, nb_seg;
216 int alloc = 0;
217 uint16_t next_dd;
218 struct rte_mbuf *first_seg = q->pkt_first_seg;
219 struct rte_mbuf *last_seg = q->pkt_last_seg;
220 int ret;
221
222 next_dd = q->next_dd;
223 nb_rcv = 0;
224
225 nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
226 for (count = 0; count < nb_seg; count++) {
227 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
228 break;
229 mbuf = q->sw_ring[next_dd];
230 desc = q->hw_ring[next_dd];
231#ifdef RTE_ETHDEV_DEBUG_RX
232 dump_rxd(&desc);
233#endif
234
235 if (++next_dd == q->nb_desc) {
236 next_dd = 0;
237 alloc = 1;
238 }
239
240
241 rte_prefetch0(q->sw_ring[next_dd]);
242
243
244
245
246
247
248 if ((next_dd & 0x3) == 0) {
249 rte_prefetch0(&q->hw_ring[next_dd]);
250 rte_prefetch0(&q->sw_ring[next_dd]);
251 }
252
253
254 rte_pktmbuf_data_len(mbuf) = desc.w.length;
255
256
257
258
259
260
261
262
263
264 if (!first_seg) {
265 first_seg = mbuf;
266 first_seg->pkt_len = desc.w.length;
267 } else {
268 first_seg->pkt_len =
269 (uint16_t)(first_seg->pkt_len +
270 rte_pktmbuf_data_len(mbuf));
271 first_seg->nb_segs++;
272 last_seg->next = mbuf;
273 }
274
275
276
277
278
279
280 if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
281 last_seg = mbuf;
282 continue;
283 }
284
285 first_seg->ol_flags = 0;
286#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
287 rx_desc_to_ol_flags(first_seg, &desc);
288#endif
289 first_seg->hash.rss = desc.d.rss;
290
291
292
293
294
295
296
297 first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
298 first_seg->vlan_tci = desc.w.vlan;
299
300
301
302
303 if (q->rx_ftag_en)
304 first_seg->vlan_tci_outer =
305 rte_le_to_cpu_16(desc.w.sglort);
306
307
308 rte_packet_prefetch((char *)first_seg->buf_addr +
309 first_seg->data_off);
310
311
312
313
314
315 rx_pkts[nb_rcv++] = first_seg;
316
317
318
319
320 first_seg = NULL;
321 }
322
323 q->next_dd = next_dd;
324
325 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
326 ret = rte_mempool_get_bulk(q->mp,
327 (void **)&q->sw_ring[q->next_alloc],
328 q->alloc_thresh);
329
330 if (unlikely(ret != 0)) {
331 uint16_t port = q->port_id;
332 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
333
334
335
336
337 q->next_dd = (q->next_dd + q->nb_desc - count) %
338 q->nb_desc;
339 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
340 return 0;
341 }
342
343 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
344 mbuf = q->sw_ring[q->next_alloc];
345
346
347 fm10k_pktmbuf_reset(mbuf, q->port_id);
348
349
350 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
351 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
352 q->hw_ring[q->next_alloc] = desc;
353 }
354 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
355 q->next_trigger += q->alloc_thresh;
356 if (q->next_trigger >= q->nb_desc) {
357 q->next_trigger = q->alloc_thresh - 1;
358 q->next_alloc = 0;
359 }
360 }
361
362 q->pkt_first_seg = first_seg;
363 q->pkt_last_seg = last_seg;
364
365 return nb_rcv;
366}
367
368uint32_t
369fm10k_dev_rx_queue_count(void *rx_queue)
370{
371#define FM10K_RXQ_SCAN_INTERVAL 4
372 volatile union fm10k_rx_desc *rxdp;
373 struct fm10k_rx_queue *rxq;
374 uint16_t desc = 0;
375
376 rxq = rx_queue;
377 rxdp = &rxq->hw_ring[rxq->next_dd];
378 while ((desc < rxq->nb_desc) &&
379 rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) {
380
381
382
383
384
385 desc += FM10K_RXQ_SCAN_INTERVAL;
386 rxdp += FM10K_RXQ_SCAN_INTERVAL;
387 if (rxq->next_dd + desc >= rxq->nb_desc)
388 rxdp = &rxq->hw_ring[rxq->next_dd + desc -
389 rxq->nb_desc];
390 }
391
392 return desc;
393}
394
395int
396fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
397{
398 volatile union fm10k_rx_desc *rxdp;
399 struct fm10k_rx_queue *rxq = rx_queue;
400 uint16_t nb_hold, trigger_last;
401 uint16_t desc;
402 int ret;
403
404 if (unlikely(offset >= rxq->nb_desc)) {
405 PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
406 return 0;
407 }
408
409 if (rxq->next_trigger < rxq->alloc_thresh)
410 trigger_last = rxq->next_trigger +
411 rxq->nb_desc - rxq->alloc_thresh;
412 else
413 trigger_last = rxq->next_trigger - rxq->alloc_thresh;
414
415 if (rxq->next_dd < trigger_last)
416 nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last;
417 else
418 nb_hold = rxq->next_dd - trigger_last;
419
420 if (offset >= rxq->nb_desc - nb_hold)
421 return RTE_ETH_RX_DESC_UNAVAIL;
422
423 desc = rxq->next_dd + offset;
424 if (desc >= rxq->nb_desc)
425 desc -= rxq->nb_desc;
426
427 rxdp = &rxq->hw_ring[desc];
428
429 ret = !!(rxdp->w.status &
430 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
431
432 return ret;
433}
434
435int
436fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
437{
438 volatile struct fm10k_tx_desc *txdp;
439 struct fm10k_tx_queue *txq = tx_queue;
440 uint16_t desc;
441 uint16_t next_rs = txq->nb_desc;
442 struct fifo rs_tracker = txq->rs_tracker;
443 struct fifo *r = &rs_tracker;
444
445 if (unlikely(offset >= txq->nb_desc))
446 return -EINVAL;
447
448 desc = txq->next_free + offset;
449
450 desc = (desc / txq->rs_thresh + 1) *
451 txq->rs_thresh - 1;
452
453 if (desc >= txq->nb_desc) {
454 desc -= txq->nb_desc;
455 if (desc >= txq->nb_desc)
456 desc -= txq->nb_desc;
457 }
458
459 r->head = r->list;
460 for ( ; r->head != r->endp; ) {
461 if (*r->head >= desc && *r->head < next_rs)
462 next_rs = *r->head;
463 ++r->head;
464 }
465
466 txdp = &txq->hw_ring[next_rs];
467 if (txdp->flags & FM10K_TXD_FLAG_DONE)
468 return RTE_ETH_TX_DESC_DONE;
469
470 return RTE_ETH_TX_DESC_FULL;
471}
472
473
474
475
476
477
478
479
480static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
481{
482 struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
483 int i;
484 int nb_free = 0;
485
486 if (unlikely(num == 0))
487 return;
488
489 m = rte_pktmbuf_prefree_seg(txep[0]);
490 if (likely(m != NULL)) {
491 free[0] = m;
492 nb_free = 1;
493 for (i = 1; i < num; i++) {
494 m = rte_pktmbuf_prefree_seg(txep[i]);
495 if (likely(m != NULL)) {
496 if (likely(m->pool == free[0]->pool))
497 free[nb_free++] = m;
498 else {
499 rte_mempool_put_bulk(free[0]->pool,
500 (void *)free, nb_free);
501 free[0] = m;
502 nb_free = 1;
503 }
504 }
505 txep[i] = NULL;
506 }
507 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
508 } else {
509 for (i = 1; i < num; i++) {
510 m = rte_pktmbuf_prefree_seg(txep[i]);
511 if (m != NULL)
512 rte_mempool_put(m->pool, m);
513 txep[i] = NULL;
514 }
515 }
516}
517
518static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
519{
520 uint16_t next_rs, count = 0;
521
522 next_rs = fifo_peek(&q->rs_tracker);
523 if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
524 return;
525
526
527
528 fifo_remove(&q->rs_tracker);
529
530
531
532 if (q->last_free > next_rs) {
533 count = q->nb_desc - q->last_free;
534 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
535 q->last_free = 0;
536 }
537
538
539 q->nb_free += count + (next_rs + 1 - q->last_free);
540
541
542 if (q->last_free <= next_rs) {
543 count = next_rs - q->last_free + 1;
544 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
545 q->last_free += count;
546 }
547
548 if (q->last_free == q->nb_desc)
549 q->last_free = 0;
550}
551
552static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
553{
554 uint16_t last_id;
555 uint8_t flags, hdrlen;
556
557
558
559 flags = FM10K_TXD_FLAG_LAST;
560 last_id = q->next_free + mb->nb_segs - 1;
561 if (last_id >= q->nb_desc)
562 last_id = last_id - q->nb_desc;
563
564
565
566 if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
567 flags |= FM10K_TXD_FLAG_RS;
568 fifo_insert(&q->rs_tracker, last_id);
569 q->nb_used = 0;
570 } else {
571 q->nb_used = q->nb_used + mb->nb_segs;
572 }
573
574 q->nb_free -= mb->nb_segs;
575
576 q->hw_ring[q->next_free].flags = 0;
577 if (q->tx_ftag_en)
578 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
579
580
581
582 if (mb->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG))
583 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
584
585
586 if (mb->ol_flags & RTE_MBUF_F_TX_VLAN)
587 q->hw_ring[q->next_free].vlan = mb->vlan_tci;
588 else
589 q->hw_ring[q->next_free].vlan = 0;
590
591 q->sw_ring[q->next_free] = mb;
592 q->hw_ring[q->next_free].buffer_addr =
593 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
594 q->hw_ring[q->next_free].buflen =
595 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
596
597 if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
598 hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
599 hdrlen += (mb->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
600 mb->outer_l2_len + mb->outer_l3_len : 0;
601 if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
602 hdrlen += sizeof(struct fm10k_ftag);
603
604 if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
605 (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
606 (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
607 q->hw_ring[q->next_free].mss = mb->tso_segsz;
608 q->hw_ring[q->next_free].hdrlen = hdrlen;
609 }
610 }
611
612 if (++q->next_free == q->nb_desc)
613 q->next_free = 0;
614
615
616 for (mb = mb->next; mb != NULL; mb = mb->next) {
617 q->sw_ring[q->next_free] = mb;
618 q->hw_ring[q->next_free].buffer_addr =
619 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
620 q->hw_ring[q->next_free].buflen =
621 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
622 q->hw_ring[q->next_free].flags = 0;
623 if (++q->next_free == q->nb_desc)
624 q->next_free = 0;
625 }
626
627 q->hw_ring[last_id].flags |= flags;
628}
629
630uint16_t
631fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
632 uint16_t nb_pkts)
633{
634 struct fm10k_tx_queue *q = tx_queue;
635 struct rte_mbuf *mb;
636 uint16_t count;
637
638 for (count = 0; count < nb_pkts; ++count) {
639 mb = tx_pkts[count];
640
641
642 if (q->nb_free < q->free_thresh)
643 tx_free_descriptors(q);
644
645
646
647 if (q->nb_free < mb->nb_segs)
648 break;
649
650
651 if ((mb->nb_segs == 0) ||
652 ((mb->nb_segs > 1) && (mb->next == NULL)))
653 break;
654
655
656 tx_xmit_pkt(q, mb);
657 }
658
659
660 if (likely(count > 0))
661 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
662
663 return count;
664}
665
666uint16_t
667fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
668 uint16_t nb_pkts)
669{
670 int i, ret;
671 struct rte_mbuf *m;
672
673 for (i = 0; i < nb_pkts; i++) {
674 m = tx_pkts[i];
675
676 if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
677 (m->tso_segsz < FM10K_TSO_MINMSS)) {
678 rte_errno = EINVAL;
679 return i;
680 }
681
682 if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
683 rte_errno = ENOTSUP;
684 return i;
685 }
686
687#ifdef RTE_ETHDEV_DEBUG_TX
688 ret = rte_validate_tx_offload(m);
689 if (ret != 0) {
690 rte_errno = -ret;
691 return i;
692 }
693#endif
694 ret = rte_net_intel_cksum_prepare(m);
695 if (ret != 0) {
696 rte_errno = -ret;
697 return i;
698 }
699 }
700
701 return i;
702}
703