1
2
3
4
5
6
7#include <rte_net.h>
8#include "qede_rxtx.h"
9
10static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
11{
12 struct rte_mbuf *new_mb = NULL;
13 struct eth_rx_bd *rx_bd;
14 dma_addr_t mapping;
15 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
16
17 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 if (unlikely(!new_mb)) {
19 PMD_RX_LOG(ERR, rxq,
20 "Failed to allocate rx buffer "
21 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 rte_mempool_avail_count(rxq->mb_pool),
24 rte_mempool_in_use_count(rxq->mb_pool));
25 return -ENOMEM;
26 }
27 rxq->sw_rx_ring[idx] = new_mb;
28 mapping = rte_mbuf_data_iova_default(new_mb);
29
30 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
31 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
32 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
33 rxq->sw_rx_prod++;
34 return 0;
35}
36
37#define QEDE_MAX_BULK_ALLOC_COUNT 512
38
39static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
40{
41 struct rte_mbuf *mbuf = NULL;
42 struct eth_rx_bd *rx_bd;
43 dma_addr_t mapping;
44 int i, ret = 0;
45 uint16_t idx;
46 uint16_t mask = NUM_RX_BDS(rxq);
47
48 if (count > QEDE_MAX_BULK_ALLOC_COUNT)
49 count = QEDE_MAX_BULK_ALLOC_COUNT;
50
51 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
52
53 if (count > mask - idx + 1)
54 count = mask - idx + 1;
55
56 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
57 count);
58
59 if (unlikely(ret)) {
60 PMD_RX_LOG(ERR, rxq,
61 "Failed to allocate %d rx buffers "
62 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
63 count,
64 rxq->sw_rx_prod & NUM_RX_BDS(rxq),
65 rxq->sw_rx_cons & NUM_RX_BDS(rxq),
66 rte_mempool_avail_count(rxq->mb_pool),
67 rte_mempool_in_use_count(rxq->mb_pool));
68 return -ENOMEM;
69 }
70
71 for (i = 0; i < count; i++) {
72 rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
73 mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
74
75 mapping = rte_mbuf_data_iova_default(mbuf);
76 rx_bd = (struct eth_rx_bd *)
77 ecore_chain_produce(&rxq->rx_bd_ring);
78 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
79 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
80 idx++;
81 }
82 rxq->sw_rx_prod = idx;
83
84 return 0;
85}
86
87
88
89
90
91
92
93
94
95
96
97
98
99int
100qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
101 uint16_t max_frame_size)
102{
103 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
104 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
105 int rx_buf_size;
106
107 if (dev->data->scattered_rx) {
108
109
110
111
112 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
113 (max_frame_size + QEDE_ETH_OVERHEAD)) {
114 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
115 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
116 return -EINVAL;
117 }
118
119 rx_buf_size = RTE_MAX(mbufsz,
120 (max_frame_size + QEDE_ETH_OVERHEAD) /
121 ETH_RX_MAX_BUFF_PER_PKT);
122 } else {
123 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
124 }
125
126
127 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
128}
129
130static struct qede_rx_queue *
131qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
132 uint16_t queue_idx,
133 uint16_t nb_desc,
134 unsigned int socket_id,
135 struct rte_mempool *mp,
136 uint16_t bufsz)
137{
138 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
139 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
140 struct qede_rx_queue *rxq;
141 size_t size;
142 int rc;
143
144
145 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
146 RTE_CACHE_LINE_SIZE, socket_id);
147
148 if (!rxq) {
149 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
150 socket_id);
151 return NULL;
152 }
153
154 rxq->qdev = qdev;
155 rxq->mb_pool = mp;
156 rxq->nb_rx_desc = nb_desc;
157 rxq->queue_id = queue_idx;
158 rxq->port_id = dev->data->port_id;
159
160
161 rxq->rx_buf_size = bufsz;
162
163 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
164 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
165
166
167 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
168 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
169 RTE_CACHE_LINE_SIZE, socket_id);
170 if (!rxq->sw_rx_ring) {
171 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
172 " socket %u\n", socket_id);
173 rte_free(rxq);
174 return NULL;
175 }
176
177
178 rc = qdev->ops->common->chain_alloc(edev,
179 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
180 ECORE_CHAIN_MODE_NEXT_PTR,
181 ECORE_CHAIN_CNT_TYPE_U16,
182 rxq->nb_rx_desc,
183 sizeof(struct eth_rx_bd),
184 &rxq->rx_bd_ring,
185 NULL);
186
187 if (rc != ECORE_SUCCESS) {
188 DP_ERR(edev, "Memory allocation fails for RX BD ring"
189 " on socket %u\n", socket_id);
190 rte_free(rxq->sw_rx_ring);
191 rte_free(rxq);
192 return NULL;
193 }
194
195
196 rc = qdev->ops->common->chain_alloc(edev,
197 ECORE_CHAIN_USE_TO_CONSUME,
198 ECORE_CHAIN_MODE_PBL,
199 ECORE_CHAIN_CNT_TYPE_U16,
200 rxq->nb_rx_desc,
201 sizeof(union eth_rx_cqe),
202 &rxq->rx_comp_ring,
203 NULL);
204
205 if (rc != ECORE_SUCCESS) {
206 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
207 " on socket %u\n", socket_id);
208 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
209 rte_free(rxq->sw_rx_ring);
210 rte_free(rxq);
211 return NULL;
212 }
213
214 return rxq;
215}
216
217int
218qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
219 uint16_t nb_desc, unsigned int socket_id,
220 __rte_unused const struct rte_eth_rxconf *rx_conf,
221 struct rte_mempool *mp)
222{
223 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
224 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
225 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
226 struct qede_rx_queue *rxq;
227 uint16_t max_rx_pkt_len;
228 uint16_t bufsz;
229 int rc;
230
231 PMD_INIT_FUNC_TRACE(edev);
232
233
234 if (!rte_is_power_of_2(nb_desc)) {
235 DP_ERR(edev, "Ring size %u is not power of 2\n",
236 nb_desc);
237 return -EINVAL;
238 }
239
240
241 if (dev->data->rx_queues[qid] != NULL) {
242 qede_rx_queue_release(dev->data->rx_queues[qid]);
243 dev->data->rx_queues[qid] = NULL;
244 }
245
246 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
247
248
249 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
250
251 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
252 if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
253 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
254 if (!dev->data->scattered_rx) {
255 DP_INFO(edev, "Forcing scatter-gather mode\n");
256 dev->data->scattered_rx = 1;
257 }
258 }
259
260 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
261 if (rc < 0)
262 return rc;
263
264 bufsz = rc;
265
266 if (ECORE_IS_CMT(edev)) {
267 rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
268 socket_id, mp, bufsz);
269 if (!rxq)
270 return -ENOMEM;
271
272 qdev->fp_array[qid * 2].rxq = rxq;
273 rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
274 socket_id, mp, bufsz);
275 if (!rxq)
276 return -ENOMEM;
277
278 qdev->fp_array[qid * 2 + 1].rxq = rxq;
279
280 dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
281 } else {
282 rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
283 socket_id, mp, bufsz);
284 if (!rxq)
285 return -ENOMEM;
286
287 dev->data->rx_queues[qid] = rxq;
288 qdev->fp_array[qid].rxq = rxq;
289 }
290
291 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
292 qid, nb_desc, rxq->rx_buf_size, socket_id);
293
294 return 0;
295}
296
297static void
298qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
299 struct qede_rx_queue *rxq)
300{
301 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
302 ecore_chain_reset(&rxq->rx_bd_ring);
303 ecore_chain_reset(&rxq->rx_comp_ring);
304 rxq->sw_rx_prod = 0;
305 rxq->sw_rx_cons = 0;
306 *rxq->hw_cons_ptr = 0;
307}
308
309static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
310{
311 uint16_t i;
312
313 if (rxq->sw_rx_ring) {
314 for (i = 0; i < rxq->nb_rx_desc; i++) {
315 if (rxq->sw_rx_ring[i]) {
316 rte_pktmbuf_free(rxq->sw_rx_ring[i]);
317 rxq->sw_rx_ring[i] = NULL;
318 }
319 }
320 }
321}
322
323static void _qede_rx_queue_release(struct qede_dev *qdev,
324 struct ecore_dev *edev,
325 struct qede_rx_queue *rxq)
326{
327 qede_rx_queue_release_mbufs(rxq);
328 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
329 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
330 rte_free(rxq->sw_rx_ring);
331 rte_free(rxq);
332}
333
334void qede_rx_queue_release(void *rx_queue)
335{
336 struct qede_rx_queue *rxq = rx_queue;
337 struct qede_fastpath_cmt *fp_cmt;
338 struct qede_dev *qdev;
339 struct ecore_dev *edev;
340
341 if (rxq) {
342 qdev = rxq->qdev;
343 edev = QEDE_INIT_EDEV(qdev);
344 PMD_INIT_FUNC_TRACE(edev);
345 if (ECORE_IS_CMT(edev)) {
346 fp_cmt = rx_queue;
347 _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
348 _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
349 } else {
350 _qede_rx_queue_release(qdev, edev, rxq);
351 }
352 }
353}
354
355
356static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
357{
358 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
359 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
360 struct ecore_hwfn *p_hwfn;
361 struct qede_rx_queue *rxq;
362 int hwfn_index;
363 int rc;
364
365 if (rx_queue_id < qdev->num_rx_queues) {
366 rxq = qdev->fp_array[rx_queue_id].rxq;
367 hwfn_index = rx_queue_id % edev->num_hwfns;
368 p_hwfn = &edev->hwfns[hwfn_index];
369 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
370 true, false);
371 if (rc != ECORE_SUCCESS) {
372 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
373 return -1;
374 }
375 qede_rx_queue_release_mbufs(rxq);
376 qede_rx_queue_reset(qdev, rxq);
377 eth_dev->data->rx_queue_state[rx_queue_id] =
378 RTE_ETH_QUEUE_STATE_STOPPED;
379 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
380 } else {
381 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
382 rc = -EINVAL;
383 }
384
385 return rc;
386}
387
388static struct qede_tx_queue *
389qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
390 uint16_t queue_idx,
391 uint16_t nb_desc,
392 unsigned int socket_id,
393 const struct rte_eth_txconf *tx_conf)
394{
395 struct qede_dev *qdev = dev->data->dev_private;
396 struct ecore_dev *edev = &qdev->edev;
397 struct qede_tx_queue *txq;
398 int rc;
399 size_t sw_tx_ring_size;
400
401 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
402 RTE_CACHE_LINE_SIZE, socket_id);
403
404 if (txq == NULL) {
405 DP_ERR(edev,
406 "Unable to allocate memory for txq on socket %u",
407 socket_id);
408 return NULL;
409 }
410
411 txq->nb_tx_desc = nb_desc;
412 txq->qdev = qdev;
413 txq->port_id = dev->data->port_id;
414
415 rc = qdev->ops->common->chain_alloc(edev,
416 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
417 ECORE_CHAIN_MODE_PBL,
418 ECORE_CHAIN_CNT_TYPE_U16,
419 txq->nb_tx_desc,
420 sizeof(union eth_tx_bd_types),
421 &txq->tx_pbl,
422 NULL);
423 if (rc != ECORE_SUCCESS) {
424 DP_ERR(edev,
425 "Unable to allocate memory for txbd ring on socket %u",
426 socket_id);
427 qede_tx_queue_release(txq);
428 return NULL;
429 }
430
431
432 sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc;
433 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
434 sw_tx_ring_size,
435 RTE_CACHE_LINE_SIZE, socket_id);
436
437 if (!txq->sw_tx_ring) {
438 DP_ERR(edev,
439 "Unable to allocate memory for txbd ring on socket %u",
440 socket_id);
441 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
442 qede_tx_queue_release(txq);
443 return NULL;
444 }
445
446 txq->queue_id = queue_idx;
447
448 txq->nb_tx_avail = txq->nb_tx_desc;
449
450 txq->tx_free_thresh =
451 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
452 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
453
454 DP_INFO(edev,
455 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
456 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
457 return txq;
458}
459
460int
461qede_tx_queue_setup(struct rte_eth_dev *dev,
462 uint16_t queue_idx,
463 uint16_t nb_desc,
464 unsigned int socket_id,
465 const struct rte_eth_txconf *tx_conf)
466{
467 struct qede_dev *qdev = dev->data->dev_private;
468 struct ecore_dev *edev = &qdev->edev;
469 struct qede_tx_queue *txq;
470
471 PMD_INIT_FUNC_TRACE(edev);
472
473 if (!rte_is_power_of_2(nb_desc)) {
474 DP_ERR(edev, "Ring size %u is not power of 2\n",
475 nb_desc);
476 return -EINVAL;
477 }
478
479
480 if (dev->data->tx_queues[queue_idx] != NULL) {
481 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
482 dev->data->tx_queues[queue_idx] = NULL;
483 }
484
485 if (ECORE_IS_CMT(edev)) {
486 txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
487 socket_id, tx_conf);
488 if (!txq)
489 return -ENOMEM;
490
491 qdev->fp_array[queue_idx * 2].txq = txq;
492 txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
493 socket_id, tx_conf);
494 if (!txq)
495 return -ENOMEM;
496
497 qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
498 dev->data->tx_queues[queue_idx] =
499 &qdev->fp_array_cmt[queue_idx];
500 } else {
501 txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
502 socket_id, tx_conf);
503 if (!txq)
504 return -ENOMEM;
505
506 dev->data->tx_queues[queue_idx] = txq;
507 qdev->fp_array[queue_idx].txq = txq;
508 }
509
510 return 0;
511}
512
513static void
514qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
515 struct qede_tx_queue *txq)
516{
517 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
518 ecore_chain_reset(&txq->tx_pbl);
519 txq->sw_tx_cons = 0;
520 txq->sw_tx_prod = 0;
521 *txq->hw_cons_ptr = 0;
522}
523
524static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
525{
526 uint16_t i;
527
528 if (txq->sw_tx_ring) {
529 for (i = 0; i < txq->nb_tx_desc; i++) {
530 if (txq->sw_tx_ring[i]) {
531 rte_pktmbuf_free(txq->sw_tx_ring[i]);
532 txq->sw_tx_ring[i] = NULL;
533 }
534 }
535 }
536}
537
538static void _qede_tx_queue_release(struct qede_dev *qdev,
539 struct ecore_dev *edev,
540 struct qede_tx_queue *txq)
541{
542 qede_tx_queue_release_mbufs(txq);
543 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
544 rte_free(txq->sw_tx_ring);
545 rte_free(txq);
546}
547
548void qede_tx_queue_release(void *tx_queue)
549{
550 struct qede_tx_queue *txq = tx_queue;
551 struct qede_fastpath_cmt *fp_cmt;
552 struct qede_dev *qdev;
553 struct ecore_dev *edev;
554
555 if (txq) {
556 qdev = txq->qdev;
557 edev = QEDE_INIT_EDEV(qdev);
558 PMD_INIT_FUNC_TRACE(edev);
559
560 if (ECORE_IS_CMT(edev)) {
561 fp_cmt = tx_queue;
562 _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
563 _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
564 } else {
565 _qede_tx_queue_release(qdev, edev, txq);
566 }
567 }
568}
569
570
571static int
572qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
573 uint16_t sb_id)
574{
575 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
576 struct status_block *sb_virt;
577 dma_addr_t sb_phys;
578 int rc;
579
580 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
581 sizeof(struct status_block));
582 if (!sb_virt) {
583 DP_ERR(edev, "Status block allocation failed\n");
584 return -ENOMEM;
585 }
586 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
587 sb_phys, sb_id);
588 if (rc) {
589 DP_ERR(edev, "Status block initialization failed\n");
590 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
591 sizeof(struct status_block));
592 return rc;
593 }
594
595 return 0;
596}
597
598int qede_alloc_fp_resc(struct qede_dev *qdev)
599{
600 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
601 struct qede_fastpath *fp;
602 uint32_t num_sbs;
603 uint16_t sb_idx;
604 int i;
605
606 PMD_INIT_FUNC_TRACE(edev);
607
608 if (IS_VF(edev))
609 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
610 else
611 num_sbs = ecore_cxt_get_proto_cid_count
612 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
613
614 if (num_sbs == 0) {
615 DP_ERR(edev, "No status blocks available\n");
616 return -EINVAL;
617 }
618
619 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
620 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
621
622 if (!qdev->fp_array) {
623 DP_ERR(edev, "fp array allocation failed\n");
624 return -ENOMEM;
625 }
626
627 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
628 sizeof(*qdev->fp_array));
629
630 if (ECORE_IS_CMT(edev)) {
631 qdev->fp_array_cmt = rte_calloc("fp_cmt",
632 QEDE_RXTX_MAX(qdev) / 2,
633 sizeof(*qdev->fp_array_cmt),
634 RTE_CACHE_LINE_SIZE);
635
636 if (!qdev->fp_array_cmt) {
637 DP_ERR(edev, "fp array for CMT allocation failed\n");
638 return -ENOMEM;
639 }
640
641 memset((void *)qdev->fp_array_cmt, 0,
642 (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
643
644
645 for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
646 qdev->fp_array_cmt[i].qdev = qdev;
647 qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
648 qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
649 }
650 }
651
652 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
653 fp = &qdev->fp_array[sb_idx];
654 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
655 RTE_CACHE_LINE_SIZE);
656 if (!fp->sb_info) {
657 DP_ERR(edev, "FP sb_info allocation fails\n");
658 return -1;
659 }
660 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
661 DP_ERR(edev, "FP status block allocation fails\n");
662 return -1;
663 }
664 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
665 fp->sb_info->igu_sb_id);
666 }
667
668 return 0;
669}
670
671void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
672{
673 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
674 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
675 struct qede_fastpath *fp;
676 uint16_t sb_idx;
677 uint8_t i;
678
679 PMD_INIT_FUNC_TRACE(edev);
680
681 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
682 fp = &qdev->fp_array[sb_idx];
683 if (fp->sb_info) {
684 DP_INFO(edev, "Free sb_info index 0x%x\n",
685 fp->sb_info->igu_sb_id);
686 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
687 fp->sb_info->sb_phys,
688 sizeof(struct status_block));
689 rte_free(fp->sb_info);
690 fp->sb_info = NULL;
691 }
692 }
693
694
695 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
696 if (eth_dev->data->rx_queues[i]) {
697 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
698 eth_dev->data->rx_queues[i] = NULL;
699 }
700 }
701
702 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
703 if (eth_dev->data->tx_queues[i]) {
704 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
705 eth_dev->data->tx_queues[i] = NULL;
706 }
707 }
708
709 if (qdev->fp_array)
710 rte_free(qdev->fp_array);
711 qdev->fp_array = NULL;
712
713 if (qdev->fp_array_cmt)
714 rte_free(qdev->fp_array_cmt);
715 qdev->fp_array_cmt = NULL;
716}
717
718static inline void
719qede_update_rx_prod(__rte_unused struct qede_dev *edev,
720 struct qede_rx_queue *rxq)
721{
722 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
723 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
724 struct eth_rx_prod_data rx_prods = { 0 };
725
726
727 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
728 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
729
730
731
732
733
734 rte_wmb();
735
736 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
737 (uint32_t *)&rx_prods);
738
739
740
741
742
743
744
745 rte_wmb();
746
747 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
748}
749
750
751static int
752qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
753{
754 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
755 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
756 struct ecore_queue_start_common_params params;
757 struct ecore_rxq_start_ret_params ret_params;
758 struct qede_rx_queue *rxq;
759 struct qede_fastpath *fp;
760 struct ecore_hwfn *p_hwfn;
761 dma_addr_t p_phys_table;
762 uint16_t page_cnt;
763 uint16_t j;
764 int hwfn_index;
765 int rc;
766
767 if (rx_queue_id < qdev->num_rx_queues) {
768 fp = &qdev->fp_array[rx_queue_id];
769 rxq = fp->rxq;
770
771 for (j = 0; j < rxq->nb_rx_desc; j++) {
772 rc = qede_alloc_rx_buffer(rxq);
773 if (rc) {
774 DP_ERR(edev, "RX buffer allocation failed"
775 " for rxq = %u\n", rx_queue_id);
776 return -ENOMEM;
777 }
778 }
779
780 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
781
782 memset(¶ms, 0, sizeof(params));
783 params.queue_id = rx_queue_id / edev->num_hwfns;
784 params.vport_id = 0;
785 params.stats_id = params.vport_id;
786 params.p_sb = fp->sb_info;
787 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
788 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
789 params.sb_idx = RX_PI;
790 hwfn_index = rx_queue_id % edev->num_hwfns;
791 p_hwfn = &edev->hwfns[hwfn_index];
792 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
793 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
794 memset(&ret_params, 0, sizeof(ret_params));
795 rc = ecore_eth_rx_queue_start(p_hwfn,
796 p_hwfn->hw_info.opaque_fid,
797 ¶ms, fp->rxq->rx_buf_size,
798 fp->rxq->rx_bd_ring.p_phys_addr,
799 p_phys_table, page_cnt,
800 &ret_params);
801 if (rc) {
802 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
803 rx_queue_id, rc);
804 return -1;
805 }
806
807 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
808 fp->rxq->handle = ret_params.p_handle;
809
810 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
811 qede_update_rx_prod(qdev, fp->rxq);
812 eth_dev->data->rx_queue_state[rx_queue_id] =
813 RTE_ETH_QUEUE_STATE_STARTED;
814 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
815 } else {
816 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
817 rc = -EINVAL;
818 }
819
820 return rc;
821}
822
823static int
824qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
825{
826 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
827 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
828 struct ecore_queue_start_common_params params;
829 struct ecore_txq_start_ret_params ret_params;
830 struct ecore_hwfn *p_hwfn;
831 dma_addr_t p_phys_table;
832 struct qede_tx_queue *txq;
833 struct qede_fastpath *fp;
834 uint16_t page_cnt;
835 int hwfn_index;
836 int rc;
837
838 if (tx_queue_id < qdev->num_tx_queues) {
839 fp = &qdev->fp_array[tx_queue_id];
840 txq = fp->txq;
841 memset(¶ms, 0, sizeof(params));
842 params.queue_id = tx_queue_id / edev->num_hwfns;
843 params.vport_id = 0;
844 params.stats_id = params.vport_id;
845 params.p_sb = fp->sb_info;
846 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
847 fp->txq->queue_id, fp->sb_info->igu_sb_id);
848 params.sb_idx = TX_PI(0);
849 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
850 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
851 hwfn_index = tx_queue_id % edev->num_hwfns;
852 p_hwfn = &edev->hwfns[hwfn_index];
853 if (qdev->dev_info.is_legacy)
854 fp->txq->is_legacy = true;
855 rc = ecore_eth_tx_queue_start(p_hwfn,
856 p_hwfn->hw_info.opaque_fid,
857 ¶ms, 0 ,
858 p_phys_table, page_cnt,
859 &ret_params);
860 if (rc != ECORE_SUCCESS) {
861 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
862 tx_queue_id, rc);
863 return -1;
864 }
865 txq->doorbell_addr = ret_params.p_doorbell;
866 txq->handle = ret_params.p_handle;
867
868 txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
869 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
870 DB_DEST_XCM);
871 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
872 DB_AGG_CMD_SET);
873 SET_FIELD(txq->tx_db.data.params,
874 ETH_DB_DATA_AGG_VAL_SEL,
875 DQ_XCM_ETH_TX_BD_PROD_CMD);
876 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
877 eth_dev->data->tx_queue_state[tx_queue_id] =
878 RTE_ETH_QUEUE_STATE_STARTED;
879 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
880 } else {
881 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
882 rc = -EINVAL;
883 }
884
885 return rc;
886}
887
888static inline void
889qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
890 struct qede_tx_queue *txq)
891{
892 uint16_t hw_bd_cons;
893 uint16_t sw_tx_cons;
894 uint16_t remaining;
895 uint16_t mask;
896 struct rte_mbuf *mbuf;
897 uint16_t nb_segs;
898 uint16_t idx;
899 uint16_t first_idx;
900
901 rte_compiler_barrier();
902 rte_prefetch0(txq->hw_cons_ptr);
903 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
904 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
905#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
906 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
907 abs(hw_bd_cons - sw_tx_cons));
908#endif
909
910 mask = NUM_TX_BDS(txq);
911 idx = txq->sw_tx_cons & mask;
912
913 remaining = hw_bd_cons - sw_tx_cons;
914 txq->nb_tx_avail += remaining;
915 first_idx = idx;
916
917 while (remaining) {
918 mbuf = txq->sw_tx_ring[idx];
919 RTE_ASSERT(mbuf);
920 nb_segs = mbuf->nb_segs;
921 remaining -= nb_segs;
922
923
924
925
926 rte_mbuf_prefetch_part1(txq->sw_tx_ring[(idx + 4) & mask]);
927 rte_mbuf_prefetch_part2(txq->sw_tx_ring[(idx + 4) & mask]);
928
929 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
930
931 while (nb_segs) {
932 ecore_chain_consume(&txq->tx_pbl);
933 nb_segs--;
934 }
935
936 idx = (idx + 1) & mask;
937 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
938 }
939 txq->sw_tx_cons = idx;
940
941 if (first_idx > idx) {
942 rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
943 mask - first_idx + 1);
944 rte_pktmbuf_free_bulk(&txq->sw_tx_ring[0], idx);
945 } else {
946 rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
947 idx - first_idx);
948 }
949}
950
951static int qede_drain_txq(struct qede_dev *qdev,
952 struct qede_tx_queue *txq, bool allow_drain)
953{
954 struct ecore_dev *edev = &qdev->edev;
955 int rc, cnt = 1000;
956
957 while (txq->sw_tx_cons != txq->sw_tx_prod) {
958 qede_process_tx_compl(edev, txq);
959 if (!cnt) {
960 if (allow_drain) {
961 DP_ERR(edev, "Tx queue[%u] is stuck,"
962 "requesting MCP to drain\n",
963 txq->queue_id);
964 rc = qdev->ops->common->drain(edev);
965 if (rc)
966 return rc;
967 return qede_drain_txq(qdev, txq, false);
968 }
969 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
970 "PROD=%d, CONS=%d\n",
971 txq->queue_id, txq->sw_tx_prod,
972 txq->sw_tx_cons);
973 return -1;
974 }
975 cnt--;
976 DELAY(1000);
977 rte_compiler_barrier();
978 }
979
980
981 DELAY(2000);
982
983 return 0;
984}
985
986
987static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
988{
989 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
990 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
991 struct ecore_hwfn *p_hwfn;
992 struct qede_tx_queue *txq;
993 int hwfn_index;
994 int rc;
995
996 if (tx_queue_id < qdev->num_tx_queues) {
997 txq = qdev->fp_array[tx_queue_id].txq;
998
999 if (qede_drain_txq(qdev, txq, true))
1000 return -1;
1001
1002 hwfn_index = tx_queue_id % edev->num_hwfns;
1003 p_hwfn = &edev->hwfns[hwfn_index];
1004 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
1005 if (rc != ECORE_SUCCESS) {
1006 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
1007 return -1;
1008 }
1009 qede_tx_queue_release_mbufs(txq);
1010 qede_tx_queue_reset(qdev, txq);
1011 eth_dev->data->tx_queue_state[tx_queue_id] =
1012 RTE_ETH_QUEUE_STATE_STOPPED;
1013 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
1014 } else {
1015 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
1016 rc = -EINVAL;
1017 }
1018
1019 return rc;
1020}
1021
1022int qede_start_queues(struct rte_eth_dev *eth_dev)
1023{
1024 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1025 uint8_t id;
1026 int rc = -1;
1027
1028 for (id = 0; id < qdev->num_rx_queues; id++) {
1029 rc = qede_rx_queue_start(eth_dev, id);
1030 if (rc != ECORE_SUCCESS)
1031 return -1;
1032 }
1033
1034 for (id = 0; id < qdev->num_tx_queues; id++) {
1035 rc = qede_tx_queue_start(eth_dev, id);
1036 if (rc != ECORE_SUCCESS)
1037 return -1;
1038 }
1039
1040 return rc;
1041}
1042
1043void qede_stop_queues(struct rte_eth_dev *eth_dev)
1044{
1045 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1046 uint8_t id;
1047
1048
1049 for (id = 0; id < qdev->num_tx_queues; id++)
1050 qede_tx_queue_stop(eth_dev, id);
1051
1052 for (id = 0; id < qdev->num_rx_queues; id++)
1053 qede_rx_queue_stop(eth_dev, id);
1054}
1055
1056static inline bool qede_tunn_exist(uint16_t flag)
1057{
1058 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1059 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
1060}
1061
1062static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
1063{
1064 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1065 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
1066}
1067
1068
1069
1070
1071
1072
1073
1074static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
1075{
1076 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1077 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
1078 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1079 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
1080
1081 return 0;
1082}
1083
1084static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
1085{
1086 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1087 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
1088 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1089 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1090
1091 return 0;
1092}
1093
1094
1095static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1096{
1097 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1098 struct rte_ether_hdr *eth_hdr;
1099 struct rte_ipv4_hdr *ipv4_hdr;
1100 struct rte_ipv6_hdr *ipv6_hdr;
1101 struct rte_vlan_hdr *vlan_hdr;
1102 uint16_t ethertype;
1103 bool vlan_tagged = 0;
1104 uint16_t len;
1105
1106 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1107 len = sizeof(struct rte_ether_hdr);
1108 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1109
1110
1111 if (ethertype == RTE_ETHER_TYPE_VLAN) {
1112 vlan_tagged = 1;
1113 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1114 len += sizeof(struct rte_vlan_hdr);
1115 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1116 }
1117
1118 if (ethertype == RTE_ETHER_TYPE_IPV4) {
1119 packet_type |= RTE_PTYPE_L3_IPV4;
1120 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1121 struct rte_ipv4_hdr *, len);
1122 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1123 packet_type |= RTE_PTYPE_L4_TCP;
1124 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1125 packet_type |= RTE_PTYPE_L4_UDP;
1126 } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1127 packet_type |= RTE_PTYPE_L3_IPV6;
1128 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1129 struct rte_ipv6_hdr *, len);
1130 if (ipv6_hdr->proto == IPPROTO_TCP)
1131 packet_type |= RTE_PTYPE_L4_TCP;
1132 else if (ipv6_hdr->proto == IPPROTO_UDP)
1133 packet_type |= RTE_PTYPE_L4_UDP;
1134 }
1135
1136 if (vlan_tagged)
1137 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1138 else
1139 packet_type |= RTE_PTYPE_L2_ETHER;
1140
1141 return packet_type;
1142}
1143
1144static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1145{
1146 uint16_t val;
1147
1148
1149 static const uint32_t
1150 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1151 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
1152 RTE_PTYPE_INNER_L2_ETHER,
1153 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
1154 RTE_PTYPE_INNER_L2_ETHER,
1155 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
1156 RTE_PTYPE_INNER_L4_TCP |
1157 RTE_PTYPE_INNER_L2_ETHER,
1158 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
1159 RTE_PTYPE_INNER_L4_TCP |
1160 RTE_PTYPE_INNER_L2_ETHER,
1161 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
1162 RTE_PTYPE_INNER_L4_UDP |
1163 RTE_PTYPE_INNER_L2_ETHER,
1164 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
1165 RTE_PTYPE_INNER_L4_UDP |
1166 RTE_PTYPE_INNER_L2_ETHER,
1167
1168 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1169 RTE_PTYPE_INNER_L4_FRAG |
1170 RTE_PTYPE_INNER_L2_ETHER,
1171 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1172 RTE_PTYPE_INNER_L4_FRAG |
1173 RTE_PTYPE_INNER_L2_ETHER,
1174
1175 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1176 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1177 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1178 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1179 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1180 RTE_PTYPE_INNER_L4_TCP |
1181 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1182 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1183 RTE_PTYPE_INNER_L4_TCP |
1184 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1185 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1186 RTE_PTYPE_INNER_L4_UDP |
1187 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1188 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1189 RTE_PTYPE_INNER_L4_UDP |
1190 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1191
1192 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1193 RTE_PTYPE_INNER_L4_FRAG |
1194 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1195 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1196 RTE_PTYPE_INNER_L4_FRAG |
1197 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1198 };
1199
1200
1201
1202 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1203 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1204 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1205 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1206 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1207 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1208 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1209 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1210
1211 if (val < QEDE_PKT_TYPE_MAX)
1212 return ptype_lkup_tbl[val];
1213
1214 return RTE_PTYPE_UNKNOWN;
1215}
1216
1217static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1218{
1219 uint16_t val;
1220
1221
1222 static const uint32_t
1223 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1224 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1225 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1226 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1227 RTE_PTYPE_L4_TCP |
1228 RTE_PTYPE_L2_ETHER,
1229 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1230 RTE_PTYPE_L4_TCP |
1231 RTE_PTYPE_L2_ETHER,
1232 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1233 RTE_PTYPE_L4_UDP |
1234 RTE_PTYPE_L2_ETHER,
1235 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1236 RTE_PTYPE_L4_UDP |
1237 RTE_PTYPE_L2_ETHER,
1238
1239 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1240 RTE_PTYPE_L4_FRAG |
1241 RTE_PTYPE_L2_ETHER,
1242 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1243 RTE_PTYPE_L4_FRAG |
1244 RTE_PTYPE_L2_ETHER,
1245
1246 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1247 RTE_PTYPE_L2_ETHER_VLAN,
1248 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1249 RTE_PTYPE_L2_ETHER_VLAN,
1250 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1251 RTE_PTYPE_L4_TCP |
1252 RTE_PTYPE_L2_ETHER_VLAN,
1253 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1254 RTE_PTYPE_L4_TCP |
1255 RTE_PTYPE_L2_ETHER_VLAN,
1256 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1257 RTE_PTYPE_L4_UDP |
1258 RTE_PTYPE_L2_ETHER_VLAN,
1259 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1260 RTE_PTYPE_L4_UDP |
1261 RTE_PTYPE_L2_ETHER_VLAN,
1262
1263 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1264 RTE_PTYPE_L4_FRAG |
1265 RTE_PTYPE_L2_ETHER_VLAN,
1266 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1267 RTE_PTYPE_L4_FRAG |
1268 RTE_PTYPE_L2_ETHER_VLAN,
1269 };
1270
1271
1272
1273 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1274 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1275 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1276 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1277 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1278 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1279 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1280 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1281
1282 if (val < QEDE_PKT_TYPE_MAX)
1283 return ptype_lkup_tbl[val];
1284
1285 return RTE_PTYPE_UNKNOWN;
1286}
1287
1288static inline uint8_t
1289qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1290{
1291 struct rte_ipv4_hdr *ip;
1292 uint16_t pkt_csum;
1293 uint16_t calc_csum;
1294 uint16_t val;
1295
1296 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1297 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1298
1299 if (unlikely(val)) {
1300 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1301 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1302 ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1303 sizeof(struct rte_ether_hdr));
1304 pkt_csum = ip->hdr_checksum;
1305 ip->hdr_checksum = 0;
1306 calc_csum = rte_ipv4_cksum(ip);
1307 ip->hdr_checksum = pkt_csum;
1308 return (calc_csum != pkt_csum);
1309 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1310 return 1;
1311 }
1312 }
1313 return 0;
1314}
1315
1316static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1317{
1318 ecore_chain_consume(&rxq->rx_bd_ring);
1319 rxq->sw_rx_cons++;
1320}
1321
1322static inline void
1323qede_reuse_page(__rte_unused struct qede_dev *qdev,
1324 struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
1325{
1326 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1327 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1328 dma_addr_t new_mapping;
1329
1330 rxq->sw_rx_ring[idx] = curr_cons;
1331
1332 new_mapping = rte_mbuf_data_iova_default(curr_cons);
1333
1334 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1335 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1336
1337 rxq->sw_rx_prod++;
1338}
1339
1340static inline void
1341qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1342 struct qede_dev *qdev, uint8_t count)
1343{
1344 struct rte_mbuf *curr_cons;
1345
1346 for (; count > 0; count--) {
1347 curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1348 qede_reuse_page(qdev, rxq, curr_cons);
1349 qede_rx_bd_ring_consume(rxq);
1350 }
1351}
1352
1353static inline void
1354qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1355 struct qede_rx_queue *rxq,
1356 uint8_t agg_index, uint16_t len)
1357{
1358 struct qede_agg_info *tpa_info;
1359 struct rte_mbuf *curr_frag;
1360 uint16_t cons_idx;
1361
1362
1363
1364
1365
1366 if (rte_le_to_cpu_16(len)) {
1367 tpa_info = &rxq->tpa_info[agg_index];
1368 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1369 curr_frag = rxq->sw_rx_ring[cons_idx];
1370 assert(curr_frag);
1371 curr_frag->nb_segs = 1;
1372 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1373 curr_frag->data_len = curr_frag->pkt_len;
1374 tpa_info->tpa_tail->next = curr_frag;
1375 tpa_info->tpa_tail = curr_frag;
1376 qede_rx_bd_ring_consume(rxq);
1377 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1378 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1379 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1380 rxq->rx_alloc_errors++;
1381 }
1382 }
1383}
1384
1385static inline void
1386qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1387 struct qede_rx_queue *rxq,
1388 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1389{
1390 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1391 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1392
1393 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1394 cqe->len_list[0]);
1395}
1396
1397static inline void
1398qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1399 struct qede_rx_queue *rxq,
1400 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1401{
1402 struct rte_mbuf *rx_mb;
1403
1404 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1405 cqe->len_list[0]);
1406
1407 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1408
1409 rx_mb->nb_segs = cqe->num_of_bds;
1410 rx_mb->pkt_len = cqe->total_packet_len;
1411
1412 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1413 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1414 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1415 rx_mb->pkt_len);
1416}
1417
1418static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1419{
1420 uint32_t val;
1421
1422
1423 static const uint32_t
1424 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1425 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1426 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1427 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1428 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1429 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1430 RTE_PTYPE_TUNNEL_GENEVE,
1431 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1432 RTE_PTYPE_TUNNEL_GRE,
1433 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1434 RTE_PTYPE_TUNNEL_VXLAN,
1435 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1436 RTE_PTYPE_TUNNEL_GENEVE,
1437 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1438 RTE_PTYPE_TUNNEL_GRE,
1439 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1440 RTE_PTYPE_TUNNEL_VXLAN,
1441 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1442 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1443 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1444 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1445 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1446 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1447 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1448 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1449 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1450 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1451 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1452 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1453 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1454 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1455 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1456 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1457 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1458 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1459 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1460 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1461 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1462 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1463 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1464 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1465 };
1466
1467
1468 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1469 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1470 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1471 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1472
1473 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1474 return ptype_tunn_lkup_tbl[val];
1475 else
1476 return RTE_PTYPE_UNKNOWN;
1477}
1478
1479static inline int
1480qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1481 uint8_t num_segs, uint16_t pkt_len)
1482{
1483 struct qede_rx_queue *rxq = p_rxq;
1484 struct qede_dev *qdev = rxq->qdev;
1485 register struct rte_mbuf *seg1 = NULL;
1486 register struct rte_mbuf *seg2 = NULL;
1487 uint16_t sw_rx_index;
1488 uint16_t cur_size;
1489
1490 seg1 = rx_mb;
1491 while (num_segs) {
1492 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1493 pkt_len;
1494 if (unlikely(!cur_size)) {
1495 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1496 " left for mapping jumbo\n", num_segs);
1497 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1498 return -EINVAL;
1499 }
1500 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1501 seg2 = rxq->sw_rx_ring[sw_rx_index];
1502 qede_rx_bd_ring_consume(rxq);
1503 pkt_len -= cur_size;
1504 seg2->data_len = cur_size;
1505 seg1->next = seg2;
1506 seg1 = seg1->next;
1507 num_segs--;
1508 rxq->rx_segs++;
1509 }
1510
1511 return 0;
1512}
1513
1514#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1515static inline void
1516print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1517 uint8_t bitfield)
1518{
1519 PMD_RX_LOG(INFO, rxq,
1520 "len 0x%04x bf 0x%04x hash_val 0x%x"
1521 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1522 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1523 m->data_len, bitfield, m->hash.rss,
1524 (unsigned long)m->ol_flags,
1525 rte_get_ptype_l2_name(m->packet_type),
1526 rte_get_ptype_l3_name(m->packet_type),
1527 rte_get_ptype_l4_name(m->packet_type),
1528 rte_get_ptype_tunnel_name(m->packet_type),
1529 rte_get_ptype_inner_l2_name(m->packet_type),
1530 rte_get_ptype_inner_l3_name(m->packet_type),
1531 rte_get_ptype_inner_l4_name(m->packet_type));
1532}
1533#endif
1534
1535uint16_t
1536qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1537{
1538 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1539 register struct rte_mbuf *rx_mb = NULL;
1540 struct qede_rx_queue *rxq = p_rxq;
1541 struct qede_dev *qdev = rxq->qdev;
1542 struct ecore_dev *edev = &qdev->edev;
1543 union eth_rx_cqe *cqe;
1544 uint64_t ol_flags;
1545 enum eth_rx_cqe_type cqe_type;
1546 int rss_enable = qdev->rss_enable;
1547 int rx_alloc_count = 0;
1548 uint32_t packet_type;
1549 uint32_t rss_hash;
1550 uint16_t vlan_tci, port_id;
1551 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
1552 uint16_t rx_pkt = 0;
1553 uint16_t pkt_len = 0;
1554 uint16_t len;
1555 uint16_t preload_idx;
1556 uint16_t parse_flag;
1557#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1558 uint8_t bitfield_val;
1559#endif
1560 uint8_t offset, flags, bd_num;
1561
1562
1563
1564 if (rxq->rx_alloc_count) {
1565 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1566 rxq->rx_alloc_count))) {
1567 struct rte_eth_dev *dev;
1568
1569 PMD_RX_LOG(ERR, rxq,
1570 "New buffer allocation failed,"
1571 "dropping incoming packetn");
1572 dev = &rte_eth_devices[rxq->port_id];
1573 dev->data->rx_mbuf_alloc_failed +=
1574 rxq->rx_alloc_count;
1575 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1576 return 0;
1577 }
1578 qede_update_rx_prod(qdev, rxq);
1579 rxq->rx_alloc_count = 0;
1580 }
1581
1582 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1583 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1584
1585 rte_rmb();
1586
1587 if (hw_comp_cons == sw_comp_cons)
1588 return 0;
1589
1590 num_rx_bds = NUM_RX_BDS(rxq);
1591 port_id = rxq->port_id;
1592
1593 while (sw_comp_cons != hw_comp_cons) {
1594 ol_flags = 0;
1595 packet_type = RTE_PTYPE_UNKNOWN;
1596 vlan_tci = 0;
1597 rss_hash = 0;
1598
1599
1600 cqe =
1601 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1602 cqe_type = cqe->fast_path_regular.type;
1603 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1604
1605 if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
1606 fp_cqe = &cqe->fast_path_regular;
1607 } else {
1608 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
1609 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1610 ecore_eth_cqe_completion
1611 (&edev->hwfns[rxq->queue_id %
1612 edev->num_hwfns],
1613 (struct eth_slow_path_rx_cqe *)cqe);
1614 }
1615 goto next_cqe;
1616 }
1617
1618
1619 sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
1620 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1621 assert(rx_mb != NULL);
1622
1623 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1624 offset = fp_cqe->placement_offset;
1625 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1626 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1627 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1628 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1629 bd_num = fp_cqe->bd_num;
1630#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1631 bitfield_val = fp_cqe->bitfields;
1632#endif
1633
1634 if (unlikely(qede_tunn_exist(parse_flag))) {
1635 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1636 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1637 PMD_RX_LOG(ERR, rxq,
1638 "L4 csum failed, flags = 0x%x\n",
1639 parse_flag);
1640 rxq->rx_hw_errors++;
1641 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1642 } else {
1643 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1644 }
1645
1646 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1647 PMD_RX_LOG(ERR, rxq,
1648 "Outer L3 csum failed, flags = 0x%x\n",
1649 parse_flag);
1650 rxq->rx_hw_errors++;
1651 ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1652 } else {
1653 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1654 }
1655
1656 flags = fp_cqe->tunnel_pars_flags.flags;
1657
1658
1659 packet_type =
1660 qede_rx_cqe_to_tunn_pkt_type(flags);
1661
1662
1663 packet_type |=
1664 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1665
1666
1667 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1668
1669
1670
1671
1672 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1673 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1674 } else {
1675 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1676 }
1677
1678
1679
1680
1681 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1682 PMD_RX_LOG(ERR, rxq,
1683 "L4 csum failed, flags = 0x%x\n",
1684 parse_flag);
1685 rxq->rx_hw_errors++;
1686 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1687 } else {
1688 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1689 }
1690 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1691 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1692 parse_flag);
1693 rxq->rx_hw_errors++;
1694 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1695 } else {
1696 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1697 }
1698
1699 if (unlikely(CQE_HAS_VLAN(parse_flag) ||
1700 CQE_HAS_OUTER_VLAN(parse_flag))) {
1701
1702 ol_flags |= PKT_RX_VLAN;
1703 if (qdev->vlan_strip_flg) {
1704 ol_flags |= PKT_RX_VLAN_STRIPPED;
1705 rx_mb->vlan_tci = vlan_tci;
1706 }
1707 }
1708
1709 if (rss_enable) {
1710 ol_flags |= PKT_RX_RSS_HASH;
1711 rx_mb->hash.rss = rss_hash;
1712 }
1713
1714 rx_alloc_count++;
1715 qede_rx_bd_ring_consume(rxq);
1716
1717
1718 preload_idx = rxq->sw_rx_cons & num_rx_bds;
1719 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
1720
1721
1722 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1723 rx_mb->port = port_id;
1724 rx_mb->ol_flags = ol_flags;
1725 rx_mb->data_len = len;
1726 rx_mb->packet_type = packet_type;
1727#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1728 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1729#endif
1730 rx_mb->nb_segs = bd_num;
1731 rx_mb->pkt_len = pkt_len;
1732
1733 rx_pkts[rx_pkt] = rx_mb;
1734 rx_pkt++;
1735
1736next_cqe:
1737 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1738 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1739 if (rx_pkt == nb_pkts) {
1740 PMD_RX_LOG(DEBUG, rxq,
1741 "Budget reached nb_pkts=%u received=%u",
1742 rx_pkt, nb_pkts);
1743 break;
1744 }
1745 }
1746
1747
1748 rxq->rx_alloc_count = rx_alloc_count;
1749
1750 rxq->rcv_pkts += rx_pkt;
1751 rxq->rx_segs += rx_pkt;
1752 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1753
1754 return rx_pkt;
1755}
1756
1757uint16_t
1758qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1759{
1760 struct qede_rx_queue *rxq = p_rxq;
1761 struct qede_dev *qdev = rxq->qdev;
1762 struct ecore_dev *edev = &qdev->edev;
1763 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1764 uint16_t rx_pkt = 0;
1765 union eth_rx_cqe *cqe;
1766 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1767 register struct rte_mbuf *rx_mb = NULL;
1768 register struct rte_mbuf *seg1 = NULL;
1769 enum eth_rx_cqe_type cqe_type;
1770 uint16_t pkt_len = 0;
1771 uint16_t len;
1772 uint8_t num_segs = 1;
1773 uint16_t preload_idx;
1774 uint16_t parse_flag;
1775#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1776 uint8_t bitfield_val;
1777#endif
1778 uint8_t tunn_parse_flag;
1779 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1780 uint64_t ol_flags;
1781 uint32_t packet_type;
1782 uint16_t vlan_tci;
1783 bool tpa_start_flg;
1784 uint8_t offset, tpa_agg_idx, flags;
1785 struct qede_agg_info *tpa_info = NULL;
1786 uint32_t rss_hash;
1787 int rx_alloc_count = 0;
1788
1789
1790
1791 if (rxq->rx_alloc_count) {
1792 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1793 rxq->rx_alloc_count))) {
1794 struct rte_eth_dev *dev;
1795
1796 PMD_RX_LOG(ERR, rxq,
1797 "New buffer allocation failed,"
1798 "dropping incoming packetn");
1799 dev = &rte_eth_devices[rxq->port_id];
1800 dev->data->rx_mbuf_alloc_failed +=
1801 rxq->rx_alloc_count;
1802 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1803 return 0;
1804 }
1805 qede_update_rx_prod(qdev, rxq);
1806 rxq->rx_alloc_count = 0;
1807 }
1808
1809 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1810 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1811
1812 rte_rmb();
1813
1814 if (hw_comp_cons == sw_comp_cons)
1815 return 0;
1816
1817 while (sw_comp_cons != hw_comp_cons) {
1818 ol_flags = 0;
1819 packet_type = RTE_PTYPE_UNKNOWN;
1820 vlan_tci = 0;
1821 tpa_start_flg = false;
1822 rss_hash = 0;
1823
1824
1825 cqe =
1826 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1827 cqe_type = cqe->fast_path_regular.type;
1828 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1829
1830 switch (cqe_type) {
1831 case ETH_RX_CQE_TYPE_REGULAR:
1832 fp_cqe = &cqe->fast_path_regular;
1833 break;
1834 case ETH_RX_CQE_TYPE_TPA_START:
1835 cqe_start_tpa = &cqe->fast_path_tpa_start;
1836 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1837 tpa_start_flg = true;
1838
1839 ol_flags |= PKT_RX_LRO;
1840
1841
1842
1843
1844 PMD_RX_LOG(INFO, rxq,
1845 "TPA start[%d] - len_on_first_bd %d header %d"
1846 " [bd_list[0] %d], [seg_len %d]\n",
1847 cqe_start_tpa->tpa_agg_index,
1848 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1849 cqe_start_tpa->header_len,
1850 rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
1851 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1852
1853 break;
1854 case ETH_RX_CQE_TYPE_TPA_CONT:
1855 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1856 &cqe->fast_path_tpa_cont);
1857 goto next_cqe;
1858 case ETH_RX_CQE_TYPE_TPA_END:
1859 qede_rx_process_tpa_end_cqe(qdev, rxq,
1860 &cqe->fast_path_tpa_end);
1861 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1862 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1863 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1864 goto tpa_end;
1865 case ETH_RX_CQE_TYPE_SLOW_PATH:
1866 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1867 ecore_eth_cqe_completion(
1868 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1869 (struct eth_slow_path_rx_cqe *)cqe);
1870
1871 default:
1872 goto next_cqe;
1873 }
1874
1875
1876 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1877 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1878 assert(rx_mb != NULL);
1879
1880
1881 if (!tpa_start_flg) {
1882 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1883 offset = fp_cqe->placement_offset;
1884 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1885 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1886 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1887 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1888#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1889 bitfield_val = fp_cqe->bitfields;
1890#endif
1891 } else {
1892 parse_flag =
1893 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1894 offset = cqe_start_tpa->placement_offset;
1895
1896 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1897 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1898#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1899 bitfield_val = cqe_start_tpa->bitfields;
1900#endif
1901 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1902 }
1903 if (qede_tunn_exist(parse_flag)) {
1904 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1905 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1906 PMD_RX_LOG(ERR, rxq,
1907 "L4 csum failed, flags = 0x%x\n",
1908 parse_flag);
1909 rxq->rx_hw_errors++;
1910 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1911 } else {
1912 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1913 }
1914
1915 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1916 PMD_RX_LOG(ERR, rxq,
1917 "Outer L3 csum failed, flags = 0x%x\n",
1918 parse_flag);
1919 rxq->rx_hw_errors++;
1920 ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1921 } else {
1922 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1923 }
1924
1925 if (tpa_start_flg)
1926 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1927 else
1928 flags = fp_cqe->tunnel_pars_flags.flags;
1929 tunn_parse_flag = flags;
1930
1931
1932 packet_type =
1933 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1934
1935
1936 packet_type |=
1937 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1938
1939
1940 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1941
1942
1943
1944
1945 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1946 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1947 } else {
1948 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1949 }
1950
1951
1952
1953
1954 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1955 PMD_RX_LOG(ERR, rxq,
1956 "L4 csum failed, flags = 0x%x\n",
1957 parse_flag);
1958 rxq->rx_hw_errors++;
1959 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1960 } else {
1961 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1962 }
1963 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1964 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1965 parse_flag);
1966 rxq->rx_hw_errors++;
1967 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1968 } else {
1969 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1970 }
1971
1972 if (CQE_HAS_VLAN(parse_flag) ||
1973 CQE_HAS_OUTER_VLAN(parse_flag)) {
1974
1975 ol_flags |= PKT_RX_VLAN;
1976 if (qdev->vlan_strip_flg) {
1977 ol_flags |= PKT_RX_VLAN_STRIPPED;
1978 rx_mb->vlan_tci = vlan_tci;
1979 }
1980 }
1981
1982
1983 if (qdev->rss_enable) {
1984 ol_flags |= PKT_RX_RSS_HASH;
1985 rx_mb->hash.rss = rss_hash;
1986 }
1987
1988 rx_alloc_count++;
1989 qede_rx_bd_ring_consume(rxq);
1990
1991 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1992 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1993 " len on first: %04x Total Len: %04x",
1994 fp_cqe->bd_num, len, pkt_len);
1995 num_segs = fp_cqe->bd_num - 1;
1996 seg1 = rx_mb;
1997 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1998 pkt_len - len))
1999 goto next_cqe;
2000
2001 rx_alloc_count += num_segs;
2002 rxq->rx_segs += num_segs;
2003 }
2004 rxq->rx_segs++;
2005
2006
2007 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
2008 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
2009
2010
2011 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
2012 rx_mb->port = rxq->port_id;
2013 rx_mb->ol_flags = ol_flags;
2014 rx_mb->data_len = len;
2015 rx_mb->packet_type = packet_type;
2016#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
2017 print_rx_bd_info(rx_mb, rxq, bitfield_val);
2018#endif
2019 if (!tpa_start_flg) {
2020 rx_mb->nb_segs = fp_cqe->bd_num;
2021 rx_mb->pkt_len = pkt_len;
2022 } else {
2023
2024 tpa_info->tpa_head = rx_mb;
2025 tpa_info->tpa_tail = tpa_info->tpa_head;
2026 }
2027 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
2028tpa_end:
2029 if (!tpa_start_flg) {
2030 rx_pkts[rx_pkt] = rx_mb;
2031 rx_pkt++;
2032 }
2033next_cqe:
2034 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
2035 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2036 if (rx_pkt == nb_pkts) {
2037 PMD_RX_LOG(DEBUG, rxq,
2038 "Budget reached nb_pkts=%u received=%u",
2039 rx_pkt, nb_pkts);
2040 break;
2041 }
2042 }
2043
2044
2045 rxq->rx_alloc_count = rx_alloc_count;
2046
2047 rxq->rcv_pkts += rx_pkt;
2048
2049 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
2050
2051 return rx_pkt;
2052}
2053
2054uint16_t
2055qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2056{
2057 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2058 uint16_t eng0_pkts, eng1_pkts;
2059
2060 eng0_pkts = nb_pkts / 2;
2061
2062 eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
2063
2064 eng1_pkts = nb_pkts - eng0_pkts;
2065
2066 eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
2067 eng1_pkts);
2068
2069 return eng0_pkts + eng1_pkts;
2070}
2071
2072
2073static inline uint16_t
2074qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
2075 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
2076 uint16_t start_seg)
2077{
2078 struct qede_tx_queue *txq = p_txq;
2079 struct eth_tx_bd *tx_bd = NULL;
2080 dma_addr_t mapping;
2081 uint16_t nb_segs = 0;
2082
2083
2084 while (m_seg) {
2085 if (start_seg == 0) {
2086 if (!*bd2) {
2087 *bd2 = (struct eth_tx_2nd_bd *)
2088 ecore_chain_produce(&txq->tx_pbl);
2089 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
2090 nb_segs++;
2091 }
2092 mapping = rte_mbuf_data_iova(m_seg);
2093 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
2094 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
2095 } else if (start_seg == 1) {
2096 if (!*bd3) {
2097 *bd3 = (struct eth_tx_3rd_bd *)
2098 ecore_chain_produce(&txq->tx_pbl);
2099 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
2100 nb_segs++;
2101 }
2102 mapping = rte_mbuf_data_iova(m_seg);
2103 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
2104 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
2105 } else {
2106 tx_bd = (struct eth_tx_bd *)
2107 ecore_chain_produce(&txq->tx_pbl);
2108 memset(tx_bd, 0, sizeof(*tx_bd));
2109 nb_segs++;
2110 mapping = rte_mbuf_data_iova(m_seg);
2111 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
2112 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
2113 }
2114 start_seg++;
2115 m_seg = m_seg->next;
2116 }
2117
2118
2119 return nb_segs;
2120}
2121
2122#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2123static inline void
2124print_tx_bd_info(struct qede_tx_queue *txq,
2125 struct eth_tx_1st_bd *bd1,
2126 struct eth_tx_2nd_bd *bd2,
2127 struct eth_tx_3rd_bd *bd3,
2128 uint64_t tx_ol_flags)
2129{
2130 char ol_buf[256] = { 0 };
2131
2132 if (bd1)
2133 PMD_TX_LOG(INFO, txq,
2134 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
2135 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
2136 bd1->data.bd_flags.bitfields,
2137 rte_cpu_to_le_16(bd1->data.bitfields));
2138 if (bd2)
2139 PMD_TX_LOG(INFO, txq,
2140 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
2141 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
2142 bd2->data.bitfields2, bd2->data.tunn_ip_size);
2143 if (bd3)
2144 PMD_TX_LOG(INFO, txq,
2145 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
2146 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
2147 rte_cpu_to_le_16(bd3->nbytes),
2148 rte_cpu_to_le_16(bd3->data.bitfields),
2149 rte_cpu_to_le_16(bd3->data.lso_mss),
2150 bd3->data.tunn_l4_hdr_start_offset_w,
2151 bd3->data.tunn_hdr_size_w);
2152
2153 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
2154 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
2155}
2156#endif
2157
2158
2159uint16_t
2160#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2161qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
2162 uint16_t nb_pkts)
2163{
2164 struct qede_tx_queue *txq = p_txq;
2165#else
2166qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
2167 uint16_t nb_pkts)
2168{
2169#endif
2170 uint64_t ol_flags;
2171 struct rte_mbuf *m;
2172 uint16_t i;
2173#ifdef RTE_LIBRTE_ETHDEV_DEBUG
2174 int ret;
2175#endif
2176
2177 for (i = 0; i < nb_pkts; i++) {
2178 m = tx_pkts[i];
2179 ol_flags = m->ol_flags;
2180 if (ol_flags & PKT_TX_TCP_SEG) {
2181 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
2182 rte_errno = EINVAL;
2183 break;
2184 }
2185
2186 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
2187 rte_errno = EINVAL;
2188 break;
2189 }
2190 } else {
2191 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
2192 rte_errno = EINVAL;
2193 break;
2194 }
2195 }
2196 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
2197
2198 if (ol_flags & PKT_TX_TUNNEL_MASK) {
2199 uint64_t temp;
2200
2201 temp = ol_flags & PKT_TX_TUNNEL_MASK;
2202 if (temp == PKT_TX_TUNNEL_VXLAN ||
2203 temp == PKT_TX_TUNNEL_GENEVE ||
2204 temp == PKT_TX_TUNNEL_MPLSINUDP ||
2205 temp == PKT_TX_TUNNEL_GRE)
2206 continue;
2207 }
2208
2209 rte_errno = ENOTSUP;
2210 break;
2211 }
2212
2213#ifdef RTE_LIBRTE_ETHDEV_DEBUG
2214 ret = rte_validate_tx_offload(m);
2215 if (ret != 0) {
2216 rte_errno = -ret;
2217 break;
2218 }
2219#endif
2220 }
2221
2222#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2223 if (unlikely(i != nb_pkts))
2224 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
2225 nb_pkts - i);
2226#endif
2227 return i;
2228}
2229
2230#define MPLSINUDP_HDR_SIZE (12)
2231
2232#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2233static inline void
2234qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
2235 struct qede_tx_queue *txq)
2236{
2237 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
2238 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
2239 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
2240 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
2241 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
2242 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
2243 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
2244 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2245 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
2246 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
2247 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2248}
2249#endif
2250
2251uint16_t
2252qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2253{
2254 struct qede_tx_queue *txq = p_txq;
2255 struct qede_dev *qdev = txq->qdev;
2256 struct ecore_dev *edev = &qdev->edev;
2257 struct eth_tx_1st_bd *bd1;
2258 struct eth_tx_2nd_bd *bd2;
2259 struct eth_tx_3rd_bd *bd3;
2260 struct rte_mbuf *m_seg = NULL;
2261 struct rte_mbuf *mbuf;
2262 struct rte_mbuf **sw_tx_ring;
2263 uint16_t nb_tx_pkts;
2264 uint16_t bd_prod;
2265 uint16_t idx;
2266 uint16_t nb_frags = 0;
2267 uint16_t nb_pkt_sent = 0;
2268 uint8_t nbds;
2269 uint64_t tx_ol_flags;
2270
2271 uint16_t bd1_bf;
2272 uint8_t bd1_bd_flags_bf;
2273
2274 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2275 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2276 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2277 qede_process_tx_compl(edev, txq);
2278 }
2279
2280 nb_tx_pkts = nb_pkts;
2281 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2282 sw_tx_ring = txq->sw_tx_ring;
2283
2284 while (nb_tx_pkts--) {
2285
2286 nbds = 0;
2287 bd1 = NULL;
2288 bd2 = NULL;
2289 bd3 = NULL;
2290 bd1_bf = 0;
2291 bd1_bd_flags_bf = 0;
2292 nb_frags = 0;
2293
2294 mbuf = *tx_pkts++;
2295 assert(mbuf);
2296
2297
2298
2299 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2300 break;
2301
2302 tx_ol_flags = mbuf->ol_flags;
2303 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2304
2305 if (unlikely(txq->nb_tx_avail <
2306 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2307 break;
2308 bd1_bf |=
2309 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2310 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2311
2312
2313 if (tx_ol_flags & PKT_TX_IP_CKSUM)
2314 bd1_bd_flags_bf |=
2315 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2316
2317
2318 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2319 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
2320 bd1_bd_flags_bf |=
2321 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2322
2323
2324 idx = TX_PROD(txq);
2325 sw_tx_ring[idx] = mbuf;
2326
2327
2328 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2329 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2330 nbds++;
2331
2332
2333 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2334 mbuf->data_len);
2335 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2336 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2337
2338
2339 if (unlikely(mbuf->nb_segs > 1)) {
2340 m_seg = mbuf->next;
2341
2342
2343 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
2344 nbds - 1);
2345 }
2346
2347 bd1->data.nbds = nbds + nb_frags;
2348
2349 txq->nb_tx_avail -= bd1->data.nbds;
2350 txq->sw_tx_prod++;
2351 bd_prod =
2352 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2353#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2354 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2355#endif
2356 nb_pkt_sent++;
2357 txq->xmit_pkts++;
2358 }
2359
2360
2361 txq->tx_db.data.bd_prod = bd_prod;
2362 rte_wmb();
2363 rte_compiler_barrier();
2364 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2365 rte_wmb();
2366
2367
2368 qede_process_tx_compl(edev, txq);
2369
2370 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2371 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2372
2373 return nb_pkt_sent;
2374}
2375
2376uint16_t
2377qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2378{
2379 struct qede_tx_queue *txq = p_txq;
2380 struct qede_dev *qdev = txq->qdev;
2381 struct ecore_dev *edev = &qdev->edev;
2382 struct rte_mbuf *mbuf;
2383 struct rte_mbuf *m_seg = NULL;
2384 uint16_t nb_tx_pkts;
2385 uint16_t bd_prod;
2386 uint16_t idx;
2387 uint16_t nb_frags;
2388 uint16_t nb_pkt_sent = 0;
2389 uint8_t nbds;
2390 bool lso_flg;
2391 bool mplsoudp_flg;
2392 __rte_unused bool tunn_flg;
2393 bool tunn_ipv6_ext_flg;
2394 struct eth_tx_1st_bd *bd1;
2395 struct eth_tx_2nd_bd *bd2;
2396 struct eth_tx_3rd_bd *bd3;
2397 uint64_t tx_ol_flags;
2398 uint16_t hdr_size;
2399
2400 uint16_t bd1_bf;
2401 uint8_t bd1_bd_flags_bf;
2402 uint16_t vlan;
2403
2404 uint16_t bd2_bf1;
2405 uint16_t bd2_bf2;
2406
2407 uint16_t mss;
2408 uint16_t bd3_bf;
2409
2410 uint8_t tunn_l4_hdr_start_offset;
2411 uint8_t tunn_hdr_size;
2412 uint8_t inner_l2_hdr_size;
2413 uint16_t inner_l4_hdr_offset;
2414
2415 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2416 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2417 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2418 qede_process_tx_compl(edev, txq);
2419 }
2420
2421 nb_tx_pkts = nb_pkts;
2422 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2423 while (nb_tx_pkts--) {
2424
2425 tunn_flg = false;
2426 lso_flg = false;
2427 nbds = 0;
2428 vlan = 0;
2429 bd1 = NULL;
2430 bd2 = NULL;
2431 bd3 = NULL;
2432 hdr_size = 0;
2433 bd1_bf = 0;
2434 bd1_bd_flags_bf = 0;
2435 bd2_bf1 = 0;
2436 bd2_bf2 = 0;
2437 mss = 0;
2438 bd3_bf = 0;
2439 mplsoudp_flg = false;
2440 tunn_ipv6_ext_flg = false;
2441 tunn_hdr_size = 0;
2442 tunn_l4_hdr_start_offset = 0;
2443
2444 mbuf = *tx_pkts++;
2445 assert(mbuf);
2446
2447
2448 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2449 break;
2450
2451 tx_ol_flags = mbuf->ol_flags;
2452 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2453
2454
2455
2456
2457
2458 tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
2459
2460 if (tunn_flg) {
2461
2462 if (unlikely(txq->nb_tx_avail <
2463 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2464 break;
2465
2466
2467 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2468 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2469
2470
2471
2472
2473 if (unlikely(txq->is_legacy)) {
2474 bd1_bf ^= 1 <<
2475 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2476 }
2477
2478
2479 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
2480 PKT_TX_OUTER_IPV4)) {
2481 bd1_bd_flags_bf |=
2482 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2483 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2484 }
2485
2486
2487
2488
2489
2490
2491
2492 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
2493 PKT_TX_TUNNEL_MPLSINUDP) {
2494 mplsoudp_flg = true;
2495#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2496 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2497#endif
2498
2499 tunn_l4_hdr_start_offset =
2500 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2501
2502 tunn_hdr_size = (mbuf->outer_l2_len +
2503 mbuf->outer_l3_len +
2504 MPLSINUDP_HDR_SIZE) / 2;
2505
2506 inner_l2_hdr_size = (mbuf->l2_len -
2507 MPLSINUDP_HDR_SIZE) / 2;
2508
2509
2510
2511 inner_l4_hdr_offset = (mbuf->l2_len -
2512 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2513
2514
2515 bd2_bf1 |= (inner_l2_hdr_size &
2516 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2517 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2518 bd2_bf1 |= (UNICAST_ADDRESS &
2519 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2520 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2521
2522 bd2_bf1 |=
2523 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2524
2525
2526 if (tx_ol_flags & PKT_TX_IPV6)
2527 bd2_bf1 |=
2528 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2529
2530
2531 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2532 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
2533 PKT_TX_TCP_CKSUM))) {
2534
2535 tunn_ipv6_ext_flg = true;
2536 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
2537 PKT_TX_UDP_CKSUM) {
2538 bd2_bf1 |=
2539 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2540 }
2541
2542
2543
2544
2545 bd2_bf1 |=
2546 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2547 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2548 bd2_bf2 |= (inner_l4_hdr_offset &
2549 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2550 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2551 }
2552 }
2553 }
2554
2555 if (tx_ol_flags & PKT_TX_TCP_SEG) {
2556 lso_flg = true;
2557 if (unlikely(txq->nb_tx_avail <
2558 ETH_TX_MIN_BDS_PER_LSO_PKT))
2559 break;
2560
2561
2562
2563
2564 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2565 if (tunn_flg)
2566 hdr_size += mbuf->outer_l2_len +
2567 mbuf->outer_l3_len;
2568
2569 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2570 bd1_bd_flags_bf |=
2571 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2572
2573 bd1_bd_flags_bf |=
2574 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2575 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2576
2577 bd3_bf |= rte_cpu_to_le_16(1 <<
2578 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2579 } else {
2580 if (unlikely(txq->nb_tx_avail <
2581 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2582 break;
2583 bd1_bf |=
2584 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2585 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2586 }
2587
2588
2589 if (tx_ol_flags & PKT_TX_VLAN_PKT) {
2590 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2591 bd1_bd_flags_bf |=
2592 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2593 }
2594
2595
2596 if (tx_ol_flags & PKT_TX_IP_CKSUM) {
2597 bd1_bd_flags_bf |=
2598 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2599
2600
2601
2602
2603
2604 if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
2605 PKT_TX_TUNNEL_GRE)) {
2606 bd1_bd_flags_bf |=
2607 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2608 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2609 }
2610 }
2611
2612
2613 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2614 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2615 bd1_bd_flags_bf |=
2616 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2617
2618
2619
2620
2621
2622 if (tunn_flg) {
2623 bd1_bd_flags_bf |=
2624 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2625 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2626 }
2627 }
2628
2629
2630 idx = TX_PROD(txq);
2631 txq->sw_tx_ring[idx] = mbuf;
2632
2633
2634 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2635 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2636 nbds++;
2637
2638
2639 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2640 mbuf->data_len);
2641 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2642 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2643 bd1->data.vlan = vlan;
2644
2645 if (lso_flg || mplsoudp_flg) {
2646 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2647 (&txq->tx_pbl);
2648 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2649 nbds++;
2650
2651
2652 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2653 hdr_size);
2654
2655 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2656 rte_mbuf_data_iova(mbuf)),
2657 mbuf->data_len - hdr_size);
2658 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2659 if (mplsoudp_flg) {
2660 bd2->data.bitfields2 =
2661 rte_cpu_to_le_16(bd2_bf2);
2662
2663 bd2->data.tunn_ip_size =
2664 rte_cpu_to_le_16(mbuf->outer_l3_len);
2665 }
2666
2667 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2668 bd3 = (struct eth_tx_3rd_bd *)
2669 ecore_chain_produce(&txq->tx_pbl);
2670 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2671 nbds++;
2672 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2673 if (lso_flg)
2674 bd3->data.lso_mss = mss;
2675 if (mplsoudp_flg) {
2676 bd3->data.tunn_l4_hdr_start_offset_w =
2677 tunn_l4_hdr_start_offset;
2678 bd3->data.tunn_hdr_size_w =
2679 tunn_hdr_size;
2680 }
2681 }
2682 }
2683
2684
2685 m_seg = mbuf->next;
2686
2687
2688 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2689 bd1->data.nbds = nbds + nb_frags;
2690
2691 txq->nb_tx_avail -= bd1->data.nbds;
2692 txq->sw_tx_prod++;
2693 bd_prod =
2694 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2695#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2696 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2697#endif
2698 nb_pkt_sent++;
2699 txq->xmit_pkts++;
2700 }
2701
2702
2703 txq->tx_db.data.bd_prod = bd_prod;
2704 rte_wmb();
2705 rte_compiler_barrier();
2706 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2707 rte_wmb();
2708
2709
2710 qede_process_tx_compl(edev, txq);
2711
2712 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2713 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2714
2715 return nb_pkt_sent;
2716}
2717
2718uint16_t
2719qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2720{
2721 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2722 uint16_t eng0_pkts, eng1_pkts;
2723
2724 eng0_pkts = nb_pkts / 2;
2725
2726 eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
2727
2728 eng1_pkts = nb_pkts - eng0_pkts;
2729
2730 eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
2731 eng1_pkts);
2732
2733 return eng0_pkts + eng1_pkts;
2734}
2735
2736uint16_t
2737qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2738 __rte_unused struct rte_mbuf **pkts,
2739 __rte_unused uint16_t nb_pkts)
2740{
2741 return 0;
2742}
2743
2744
2745
2746
2747
2748
2749static uint16_t
2750qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2751{
2752 uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2753 union eth_rx_cqe *cqe, *orig_cqe = NULL;
2754
2755 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2756 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2757
2758 if (hw_comp_cons == sw_comp_cons)
2759 return 0;
2760
2761
2762 cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2763 orig_cqe = cqe;
2764
2765 while (sw_comp_cons != hw_comp_cons) {
2766 switch (cqe->fast_path_regular.type) {
2767 case ETH_RX_CQE_TYPE_REGULAR:
2768 bd_count += cqe->fast_path_regular.bd_num;
2769 break;
2770 case ETH_RX_CQE_TYPE_TPA_END:
2771 bd_count += cqe->fast_path_tpa_end.num_of_bds;
2772 break;
2773 default:
2774 break;
2775 }
2776
2777 cqe =
2778 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2779 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2780 }
2781
2782
2783 ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2784
2785 return bd_count;
2786}
2787
2788int
2789qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2790{
2791 uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2792 uint16_t produced, consumed;
2793 struct qede_rx_queue *rxq = p_rxq;
2794
2795 if (offset > rxq->nb_rx_desc)
2796 return -EINVAL;
2797
2798 sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2799 sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2800
2801
2802 hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2803
2804 if (hw_bd_cons < sw_bd_cons)
2805
2806 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2807 else
2808 consumed = hw_bd_cons - sw_bd_cons;
2809
2810 if (offset <= consumed)
2811 return RTE_ETH_RX_DESC_DONE;
2812
2813 if (sw_bd_prod < sw_bd_cons)
2814
2815 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2816 else
2817 produced = sw_bd_prod - sw_bd_cons;
2818
2819 if (offset <= produced)
2820 return RTE_ETH_RX_DESC_AVAIL;
2821
2822 return RTE_ETH_RX_DESC_UNAVAIL;
2823}
2824