1
2
3
4
5#include <rte_bus_pci.h>
6#include <rte_common.h>
7#include <rte_cycles.h>
8#include <rte_geneve.h>
9#include <rte_vxlan.h>
10#include <ethdev_driver.h>
11#include <rte_io.h>
12#include <rte_net.h>
13#include <rte_malloc.h>
14#if defined(RTE_ARCH_ARM64)
15#include <rte_cpuflags.h>
16#include <rte_vect.h>
17#endif
18
19#include "hns3_ethdev.h"
20#include "hns3_rxtx.h"
21#include "hns3_regs.h"
22#include "hns3_logs.h"
23
24#define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1)
25#define HNS3_RX_RING_PREFETCTH_MASK 3
26
27static void
28hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
29{
30 uint16_t i;
31
32
33 if (rxq->sw_ring == NULL)
34 return;
35
36 if (rxq->rx_rearm_nb == 0) {
37 for (i = 0; i < rxq->nb_rx_desc; i++) {
38 if (rxq->sw_ring[i].mbuf != NULL) {
39 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
40 rxq->sw_ring[i].mbuf = NULL;
41 }
42 }
43 } else {
44 for (i = rxq->next_to_use;
45 i != rxq->rx_rearm_start;
46 i = (i + 1) % rxq->nb_rx_desc) {
47 if (rxq->sw_ring[i].mbuf != NULL) {
48 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
49 rxq->sw_ring[i].mbuf = NULL;
50 }
51 }
52 }
53
54 for (i = 0; i < rxq->bulk_mbuf_num; i++)
55 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
56 rxq->bulk_mbuf_num = 0;
57
58 if (rxq->pkt_first_seg) {
59 rte_pktmbuf_free(rxq->pkt_first_seg);
60 rxq->pkt_first_seg = NULL;
61 }
62}
63
64static void
65hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
66{
67 uint16_t i;
68
69
70 if (txq->sw_ring) {
71 for (i = 0; i < txq->nb_tx_desc; i++) {
72 if (txq->sw_ring[i].mbuf) {
73 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
74 txq->sw_ring[i].mbuf = NULL;
75 }
76 }
77 }
78}
79
80static void
81hns3_rx_queue_release(void *queue)
82{
83 struct hns3_rx_queue *rxq = queue;
84 if (rxq) {
85 hns3_rx_queue_release_mbufs(rxq);
86 if (rxq->mz)
87 rte_memzone_free(rxq->mz);
88 if (rxq->sw_ring)
89 rte_free(rxq->sw_ring);
90 rte_free(rxq);
91 }
92}
93
94static void
95hns3_tx_queue_release(void *queue)
96{
97 struct hns3_tx_queue *txq = queue;
98 if (txq) {
99 hns3_tx_queue_release_mbufs(txq);
100 if (txq->mz)
101 rte_memzone_free(txq->mz);
102 if (txq->sw_ring)
103 rte_free(txq->sw_ring);
104 if (txq->free)
105 rte_free(txq->free);
106 rte_free(txq);
107 }
108}
109
110void
111hns3_dev_rx_queue_release(void *queue)
112{
113 struct hns3_rx_queue *rxq = queue;
114 struct hns3_adapter *hns;
115
116 if (rxq == NULL)
117 return;
118
119 hns = rxq->hns;
120 rte_spinlock_lock(&hns->hw.lock);
121 hns3_rx_queue_release(queue);
122 rte_spinlock_unlock(&hns->hw.lock);
123}
124
125void
126hns3_dev_tx_queue_release(void *queue)
127{
128 struct hns3_tx_queue *txq = queue;
129 struct hns3_adapter *hns;
130
131 if (txq == NULL)
132 return;
133
134 hns = txq->hns;
135 rte_spinlock_lock(&hns->hw.lock);
136 hns3_tx_queue_release(queue);
137 rte_spinlock_unlock(&hns->hw.lock);
138}
139
140static void
141hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
142{
143 struct hns3_rx_queue *rxq = queue;
144 struct hns3_adapter *hns;
145 struct hns3_hw *hw;
146 uint16_t idx;
147
148 if (rxq == NULL)
149 return;
150
151 hns = rxq->hns;
152 hw = &hns->hw;
153 idx = rxq->queue_id;
154 if (hw->fkq_data.rx_queues[idx]) {
155 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
156 hw->fkq_data.rx_queues[idx] = NULL;
157 }
158
159
160 if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
161 hw->fkq_data.nb_fake_rx_queues = 0;
162 rte_free(hw->fkq_data.rx_queues);
163 hw->fkq_data.rx_queues = NULL;
164 }
165}
166
167static void
168hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
169{
170 struct hns3_tx_queue *txq = queue;
171 struct hns3_adapter *hns;
172 struct hns3_hw *hw;
173 uint16_t idx;
174
175 if (txq == NULL)
176 return;
177
178 hns = txq->hns;
179 hw = &hns->hw;
180 idx = txq->queue_id;
181 if (hw->fkq_data.tx_queues[idx]) {
182 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
183 hw->fkq_data.tx_queues[idx] = NULL;
184 }
185
186
187 if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
188 hw->fkq_data.nb_fake_tx_queues = 0;
189 rte_free(hw->fkq_data.tx_queues);
190 hw->fkq_data.tx_queues = NULL;
191 }
192}
193
194static void
195hns3_free_rx_queues(struct rte_eth_dev *dev)
196{
197 struct hns3_adapter *hns = dev->data->dev_private;
198 struct hns3_fake_queue_data *fkq_data;
199 struct hns3_hw *hw = &hns->hw;
200 uint16_t nb_rx_q;
201 uint16_t i;
202
203 nb_rx_q = hw->data->nb_rx_queues;
204 for (i = 0; i < nb_rx_q; i++) {
205 if (dev->data->rx_queues[i]) {
206 hns3_rx_queue_release(dev->data->rx_queues[i]);
207 dev->data->rx_queues[i] = NULL;
208 }
209 }
210
211
212 fkq_data = &hw->fkq_data;
213 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
214 if (fkq_data->rx_queues[i])
215 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
216 }
217}
218
219static void
220hns3_free_tx_queues(struct rte_eth_dev *dev)
221{
222 struct hns3_adapter *hns = dev->data->dev_private;
223 struct hns3_fake_queue_data *fkq_data;
224 struct hns3_hw *hw = &hns->hw;
225 uint16_t nb_tx_q;
226 uint16_t i;
227
228 nb_tx_q = hw->data->nb_tx_queues;
229 for (i = 0; i < nb_tx_q; i++) {
230 if (dev->data->tx_queues[i]) {
231 hns3_tx_queue_release(dev->data->tx_queues[i]);
232 dev->data->tx_queues[i] = NULL;
233 }
234 }
235
236
237 fkq_data = &hw->fkq_data;
238 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
239 if (fkq_data->tx_queues[i])
240 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
241 }
242}
243
244void
245hns3_free_all_queues(struct rte_eth_dev *dev)
246{
247 hns3_free_rx_queues(dev);
248 hns3_free_tx_queues(dev);
249}
250
251static int
252hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
253{
254 struct rte_mbuf *mbuf;
255 uint64_t dma_addr;
256 uint16_t i;
257
258 for (i = 0; i < rxq->nb_rx_desc; i++) {
259 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
260 if (unlikely(mbuf == NULL)) {
261 hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
262 i);
263 hns3_rx_queue_release_mbufs(rxq);
264 return -ENOMEM;
265 }
266
267 rte_mbuf_refcnt_set(mbuf, 1);
268 mbuf->next = NULL;
269 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
270 mbuf->nb_segs = 1;
271 mbuf->port = rxq->port_id;
272
273 rxq->sw_ring[i].mbuf = mbuf;
274 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
275 rxq->rx_ring[i].addr = dma_addr;
276 rxq->rx_ring[i].rx.bd_base_info = 0;
277 }
278
279 return 0;
280}
281
282static int
283hns3_buf_size2type(uint32_t buf_size)
284{
285 int bd_size_type;
286
287 switch (buf_size) {
288 case 512:
289 bd_size_type = HNS3_BD_SIZE_512_TYPE;
290 break;
291 case 1024:
292 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
293 break;
294 case 4096:
295 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
296 break;
297 default:
298 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
299 }
300
301 return bd_size_type;
302}
303
304static void
305hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
306{
307 uint32_t rx_buf_len = rxq->rx_buf_len;
308 uint64_t dma_addr = rxq->rx_ring_phys_addr;
309
310 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
311 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
312 (uint32_t)((dma_addr >> 31) >> 1));
313
314 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
315 hns3_buf_size2type(rx_buf_len));
316 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
317 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
318}
319
320static void
321hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
322{
323 uint64_t dma_addr = txq->tx_ring_phys_addr;
324
325 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
326 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
327 (uint32_t)((dma_addr >> 31) >> 1));
328
329 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
330 HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
331}
332
333void
334hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
335{
336 uint16_t nb_rx_q = hw->data->nb_rx_queues;
337 uint16_t nb_tx_q = hw->data->nb_tx_queues;
338 struct hns3_rx_queue *rxq;
339 struct hns3_tx_queue *txq;
340 bool pvid_en;
341 int i;
342
343 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
344 for (i = 0; i < hw->cfg_max_queues; i++) {
345 if (i < nb_rx_q) {
346 rxq = hw->data->rx_queues[i];
347 if (rxq != NULL)
348 rxq->pvid_sw_discard_en = pvid_en;
349 }
350 if (i < nb_tx_q) {
351 txq = hw->data->tx_queues[i];
352 if (txq != NULL)
353 txq->pvid_sw_shift_en = pvid_en;
354 }
355 }
356}
357
358static void
359hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
360{
361 uint32_t reg_offset;
362 uint32_t reg;
363
364 reg_offset = queue_type == HNS3_RING_TYPE_TX ?
365 HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
366 reg = hns3_read_reg(tqp_base, reg_offset);
367 reg &= ~BIT(HNS3_RING_EN_B);
368 hns3_write_reg(tqp_base, reg_offset, reg);
369}
370
371void
372hns3_enable_all_queues(struct hns3_hw *hw, bool en)
373{
374 uint16_t nb_rx_q = hw->data->nb_rx_queues;
375 uint16_t nb_tx_q = hw->data->nb_tx_queues;
376 struct hns3_rx_queue *rxq;
377 struct hns3_tx_queue *txq;
378 uint32_t rcb_reg;
379 void *tqp_base;
380 int i;
381
382 for (i = 0; i < hw->cfg_max_queues; i++) {
383 if (hns3_dev_indep_txrx_supported(hw)) {
384 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
385 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
386
387 tqp_base = (void *)((char *)hw->io_base +
388 hns3_get_tqp_reg_offset(i));
389
390
391
392
393
394
395
396 if (rxq == NULL)
397 hns3_stop_unused_queue(tqp_base,
398 HNS3_RING_TYPE_RX);
399 if (txq == NULL)
400 hns3_stop_unused_queue(tqp_base,
401 HNS3_RING_TYPE_TX);
402 } else {
403 rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
404 hw->fkq_data.rx_queues[i - nb_rx_q];
405
406 tqp_base = rxq->io_base;
407 }
408
409
410
411
412
413 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
414 if (en)
415 rcb_reg |= BIT(HNS3_RING_EN_B);
416 else
417 rcb_reg &= ~BIT(HNS3_RING_EN_B);
418 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
419 }
420}
421
422static void
423hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
424{
425 struct hns3_hw *hw = &txq->hns->hw;
426 uint32_t reg;
427
428 if (hns3_dev_indep_txrx_supported(hw)) {
429 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
430 if (en)
431 reg |= BIT(HNS3_RING_EN_B);
432 else
433 reg &= ~BIT(HNS3_RING_EN_B);
434 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
435 }
436 txq->enabled = en;
437}
438
439static void
440hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
441{
442 struct hns3_hw *hw = &rxq->hns->hw;
443 uint32_t reg;
444
445 if (hns3_dev_indep_txrx_supported(hw)) {
446 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
447 if (en)
448 reg |= BIT(HNS3_RING_EN_B);
449 else
450 reg &= ~BIT(HNS3_RING_EN_B);
451 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
452 }
453 rxq->enabled = en;
454}
455
456int
457hns3_start_all_txqs(struct rte_eth_dev *dev)
458{
459 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
460 struct hns3_tx_queue *txq;
461 uint16_t i, j;
462
463 for (i = 0; i < dev->data->nb_tx_queues; i++) {
464 txq = hw->data->tx_queues[i];
465 if (!txq) {
466 hns3_err(hw, "Tx queue %u not available or setup.", i);
467 goto start_txqs_fail;
468 }
469
470
471
472
473
474
475
476 if (txq->tx_deferred_start)
477 hns3_enable_txq(txq, false);
478 else
479 hns3_enable_txq(txq, true);
480 }
481 return 0;
482
483start_txqs_fail:
484 for (j = 0; j < i; j++) {
485 txq = hw->data->tx_queues[j];
486 hns3_enable_txq(txq, false);
487 }
488 return -EINVAL;
489}
490
491int
492hns3_start_all_rxqs(struct rte_eth_dev *dev)
493{
494 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495 struct hns3_rx_queue *rxq;
496 uint16_t i, j;
497
498 for (i = 0; i < dev->data->nb_rx_queues; i++) {
499 rxq = hw->data->rx_queues[i];
500 if (!rxq) {
501 hns3_err(hw, "Rx queue %u not available or setup.", i);
502 goto start_rxqs_fail;
503 }
504
505
506
507
508
509
510
511 if (rxq->rx_deferred_start)
512 hns3_enable_rxq(rxq, false);
513 else
514 hns3_enable_rxq(rxq, true);
515 }
516 return 0;
517
518start_rxqs_fail:
519 for (j = 0; j < i; j++) {
520 rxq = hw->data->rx_queues[j];
521 hns3_enable_rxq(rxq, false);
522 }
523 return -EINVAL;
524}
525
526void
527hns3_restore_tqp_enable_state(struct hns3_hw *hw)
528{
529 struct hns3_rx_queue *rxq;
530 struct hns3_tx_queue *txq;
531 uint16_t i;
532
533 for (i = 0; i < hw->data->nb_rx_queues; i++) {
534 rxq = hw->data->rx_queues[i];
535 if (rxq != NULL)
536 hns3_enable_rxq(rxq, rxq->enabled);
537 }
538
539 for (i = 0; i < hw->data->nb_tx_queues; i++) {
540 txq = hw->data->tx_queues[i];
541 if (txq != NULL)
542 hns3_enable_txq(txq, txq->enabled);
543 }
544}
545
546void
547hns3_stop_all_txqs(struct rte_eth_dev *dev)
548{
549 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550 struct hns3_tx_queue *txq;
551 uint16_t i;
552
553 for (i = 0; i < dev->data->nb_tx_queues; i++) {
554 txq = hw->data->tx_queues[i];
555 if (!txq)
556 continue;
557 hns3_enable_txq(txq, false);
558 }
559}
560
561static int
562hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
563{
564 struct hns3_cfg_com_tqp_queue_cmd *req;
565 struct hns3_cmd_desc desc;
566 int ret;
567
568 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
569
570 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
571 req->tqp_id = rte_cpu_to_le_16(queue_id);
572 req->stream_id = 0;
573 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
574
575 ret = hns3_cmd_send(hw, &desc, 1);
576 if (ret)
577 hns3_err(hw, "TQP enable fail, ret = %d", ret);
578
579 return ret;
580}
581
582static int
583hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
584{
585 struct hns3_reset_tqp_queue_cmd *req;
586 struct hns3_cmd_desc desc;
587 int ret;
588
589 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
590
591 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
592 req->tqp_id = rte_cpu_to_le_16(queue_id);
593 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
594 ret = hns3_cmd_send(hw, &desc, 1);
595 if (ret)
596 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
597 "ret = %d", queue_id, ret);
598
599 return ret;
600}
601
602static int
603hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
604 uint8_t *reset_status)
605{
606 struct hns3_reset_tqp_queue_cmd *req;
607 struct hns3_cmd_desc desc;
608 int ret;
609
610 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
611
612 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
613 req->tqp_id = rte_cpu_to_le_16(queue_id);
614
615 ret = hns3_cmd_send(hw, &desc, 1);
616 if (ret) {
617 hns3_err(hw, "get tqp reset status error, queue_id = %u, "
618 "ret = %d.", queue_id, ret);
619 return ret;
620 }
621 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
622 return ret;
623}
624
625static int
626hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
627{
628#define HNS3_TQP_RESET_TRY_MS 200
629 uint16_t wait_time = 0;
630 uint8_t reset_status;
631 int ret;
632
633
634
635
636
637
638 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
639 if (ret) {
640 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
641 return ret;
642 }
643
644 do {
645
646 rte_delay_ms(HNS3_POLL_RESPONE_MS);
647 wait_time += HNS3_POLL_RESPONE_MS;
648 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
649 if (ret)
650 goto tqp_reset_fail;
651
652 if (reset_status)
653 break;
654 } while (wait_time < HNS3_TQP_RESET_TRY_MS);
655
656 if (!reset_status) {
657 ret = -ETIMEDOUT;
658 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
659 queue_id, ret);
660 goto tqp_reset_fail;
661 }
662
663 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
664 if (ret)
665 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
666
667 return ret;
668
669tqp_reset_fail:
670 hns3_send_reset_tqp_cmd(hw, queue_id, false);
671 return ret;
672}
673
674static int
675hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
676{
677 uint8_t msg_data[2];
678 int ret;
679
680 memcpy(msg_data, &queue_id, sizeof(uint16_t));
681
682 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
683 sizeof(msg_data), true, NULL, 0);
684 if (ret)
685 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
686 queue_id, ret);
687 return ret;
688}
689
690static int
691hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
692{
693 struct hns3_reset_cmd *req;
694 struct hns3_cmd_desc desc;
695 int ret;
696
697 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
698 req = (struct hns3_reset_cmd *)desc.data;
699 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
700
701
702
703
704
705
706
707 req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
708 req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
709
710 ret = hns3_cmd_send(hw, &desc, 1);
711 if (ret) {
712 hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
713 return ret;
714 }
715
716 *reset_status = req->fun_reset_rcb_return_status;
717 return 0;
718}
719
720static int
721hns3pf_reset_all_tqps(struct hns3_hw *hw)
722{
723#define HNS3_RESET_RCB_NOT_SUPPORT 0U
724#define HNS3_RESET_ALL_TQP_SUCCESS 1U
725 uint8_t reset_status;
726 int ret;
727 int i;
728
729 ret = hns3_reset_rcb_cmd(hw, &reset_status);
730 if (ret)
731 return ret;
732
733
734
735
736
737
738 if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
739 for (i = 0; i < hw->cfg_max_queues; i++) {
740 ret = hns3pf_reset_tqp(hw, i);
741 if (ret) {
742 hns3_err(hw,
743 "fail to reset tqp, queue_id = %d, ret = %d.",
744 i, ret);
745 return ret;
746 }
747 }
748 } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
749 hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
750 reset_status);
751 return -EIO;
752 }
753
754 return 0;
755}
756
757static int
758hns3vf_reset_all_tqps(struct hns3_hw *hw)
759{
760#define HNS3VF_RESET_ALL_TQP_DONE 1U
761 uint8_t reset_status;
762 uint8_t msg_data[2];
763 int ret;
764 int i;
765
766 memset(msg_data, 0, sizeof(uint16_t));
767 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
768 sizeof(msg_data), true, &reset_status,
769 sizeof(reset_status));
770 if (ret) {
771 hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
772 return ret;
773 }
774
775 if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
776 return 0;
777
778
779
780
781
782
783 for (i = 1; i < hw->cfg_max_queues; i++) {
784 ret = hns3vf_reset_tqp(hw, i);
785 if (ret)
786 return ret;
787 }
788
789 return 0;
790}
791
792int
793hns3_reset_all_tqps(struct hns3_adapter *hns)
794{
795 struct hns3_hw *hw = &hns->hw;
796 int ret, i;
797
798
799 for (i = 0; i < hw->cfg_max_queues; i++) {
800 ret = hns3_tqp_enable(hw, i, false);
801 if (ret) {
802 hns3_err(hw,
803 "fail to disable tqps before tqps reset, ret = %d.",
804 ret);
805 return ret;
806 }
807 }
808
809 if (hns->is_vf)
810 return hns3vf_reset_all_tqps(hw);
811 else
812 return hns3pf_reset_all_tqps(hw);
813}
814
815static int
816hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
817 enum hns3_ring_type queue_type, bool enable)
818{
819 struct hns3_reset_tqp_queue_cmd *req;
820 struct hns3_cmd_desc desc;
821 int queue_direction;
822 int ret;
823
824 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
825
826 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
827 req->tqp_id = rte_cpu_to_le_16(queue_id);
828 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
829 req->queue_direction = rte_cpu_to_le_16(queue_direction);
830 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
831
832 ret = hns3_cmd_send(hw, &desc, 1);
833 if (ret)
834 hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
835 "queue_type = %s, ret = %d.", queue_id,
836 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
837 return ret;
838}
839
840static int
841hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
842 enum hns3_ring_type queue_type,
843 uint8_t *reset_status)
844{
845 struct hns3_reset_tqp_queue_cmd *req;
846 struct hns3_cmd_desc desc;
847 int queue_direction;
848 int ret;
849
850 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
851
852 req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
853 req->tqp_id = rte_cpu_to_le_16(queue_id);
854 queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
855 req->queue_direction = rte_cpu_to_le_16(queue_direction);
856
857 ret = hns3_cmd_send(hw, &desc, 1);
858 if (ret) {
859 hns3_err(hw, "get queue reset status error, queue_id = %u "
860 "queue_type = %s, ret = %d.", queue_id,
861 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
862 return ret;
863 }
864
865 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
866 return ret;
867}
868
869static int
870hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
871 enum hns3_ring_type queue_type)
872{
873#define HNS3_QUEUE_RESET_TRY_MS 200
874 struct hns3_tx_queue *txq;
875 struct hns3_rx_queue *rxq;
876 uint32_t reset_wait_times;
877 uint32_t max_wait_times;
878 uint8_t reset_status;
879 int ret;
880
881 if (queue_type == HNS3_RING_TYPE_TX) {
882 txq = hw->data->tx_queues[queue_id];
883 hns3_enable_txq(txq, false);
884 } else {
885 rxq = hw->data->rx_queues[queue_id];
886 hns3_enable_rxq(rxq, false);
887 }
888
889 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
890 if (ret) {
891 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
892 return ret;
893 }
894
895 reset_wait_times = 0;
896 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
897 while (reset_wait_times < max_wait_times) {
898
899 rte_delay_ms(HNS3_POLL_RESPONE_MS);
900 ret = hns3_get_queue_reset_status(hw, queue_id,
901 queue_type, &reset_status);
902 if (ret)
903 goto queue_reset_fail;
904
905 if (reset_status)
906 break;
907 reset_wait_times++;
908 }
909
910 if (!reset_status) {
911 hns3_err(hw, "reset queue timeout, queue_id = %u, "
912 "queue_type = %s", queue_id,
913 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
914 ret = -ETIMEDOUT;
915 goto queue_reset_fail;
916 }
917
918 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
919 if (ret)
920 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
921
922 return ret;
923
924queue_reset_fail:
925 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
926 return ret;
927}
928
929uint32_t
930hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
931{
932 uint32_t reg_offset;
933
934
935 if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
936 reg_offset = HNS3_TQP_INTR_REG_BASE +
937 tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
938 else
939 reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
940 tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
941 HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
942 tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
943 HNS3_TQP_INTR_LOW_ORDER_OFFSET;
944
945 return reg_offset;
946}
947
948void
949hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
950 uint8_t gl_idx, uint16_t gl_value)
951{
952 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
953 HNS3_TQP_INTR_GL1_REG,
954 HNS3_TQP_INTR_GL2_REG};
955 uint32_t addr, value;
956
957 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
958 return;
959
960 addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
961 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
962 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
963 else
964 value = HNS3_GL_USEC_TO_REG(gl_value);
965
966 hns3_write_dev(hw, addr, value);
967}
968
969void
970hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
971{
972 uint32_t addr, value;
973
974 if (rl_value > HNS3_TQP_INTR_RL_MAX)
975 return;
976
977 addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
978 value = HNS3_RL_USEC_TO_REG(rl_value);
979 if (value > 0)
980 value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
981
982 hns3_write_dev(hw, addr, value);
983}
984
985void
986hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
987{
988 uint32_t addr;
989
990
991
992
993
994
995 if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
996 return;
997
998 addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
999 hns3_write_dev(hw, addr, ql_value);
1000
1001 addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1002 hns3_write_dev(hw, addr, ql_value);
1003}
1004
1005static void
1006hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
1007{
1008 uint32_t addr, value;
1009
1010 addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1011 value = en ? 1 : 0;
1012
1013 hns3_write_dev(hw, addr, value);
1014}
1015
1016
1017
1018
1019
1020
1021
1022void
1023hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
1024{
1025 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1026 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1027 int i;
1028
1029 if (dev->data->dev_conf.intr_conf.rxq == 0)
1030 return;
1031
1032 for (i = 0; i < nb_rx_q; i++)
1033 hns3_queue_intr_enable(hw, i, en);
1034}
1035
1036int
1037hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1038{
1039 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1040 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1041 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1042
1043 if (dev->data->dev_conf.intr_conf.rxq == 0)
1044 return -ENOTSUP;
1045
1046 hns3_queue_intr_enable(hw, queue_id, true);
1047
1048 return rte_intr_ack(intr_handle);
1049}
1050
1051int
1052hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1053{
1054 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1055
1056 if (dev->data->dev_conf.intr_conf.rxq == 0)
1057 return -ENOTSUP;
1058
1059 hns3_queue_intr_enable(hw, queue_id, false);
1060
1061 return 0;
1062}
1063
1064static int
1065hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
1066{
1067 struct hns3_hw *hw = &hns->hw;
1068 struct hns3_rx_queue *rxq;
1069 int ret;
1070
1071 PMD_INIT_FUNC_TRACE();
1072
1073 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
1074 ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
1075 if (ret) {
1076 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
1077 idx, ret);
1078 return ret;
1079 }
1080
1081 rxq->next_to_use = 0;
1082 rxq->rx_rearm_start = 0;
1083 rxq->rx_free_hold = 0;
1084 rxq->rx_rearm_nb = 0;
1085 rxq->pkt_first_seg = NULL;
1086 rxq->pkt_last_seg = NULL;
1087 hns3_init_rx_queue_hw(rxq);
1088 hns3_rxq_vec_setup(rxq);
1089
1090 return 0;
1091}
1092
1093static void
1094hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
1095{
1096 struct hns3_hw *hw = &hns->hw;
1097 struct hns3_rx_queue *rxq;
1098
1099 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
1100 rxq->next_to_use = 0;
1101 rxq->rx_free_hold = 0;
1102 rxq->rx_rearm_start = 0;
1103 rxq->rx_rearm_nb = 0;
1104 hns3_init_rx_queue_hw(rxq);
1105}
1106
1107static void
1108hns3_init_txq(struct hns3_tx_queue *txq)
1109{
1110 struct hns3_desc *desc;
1111 int i;
1112
1113
1114 desc = txq->tx_ring;
1115 for (i = 0; i < txq->nb_tx_desc; i++) {
1116 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1117 desc++;
1118 }
1119
1120 txq->next_to_use = 0;
1121 txq->next_to_clean = 0;
1122 txq->tx_bd_ready = txq->nb_tx_desc - 1;
1123 hns3_init_tx_queue_hw(txq);
1124}
1125
1126static void
1127hns3_init_tx_ring_tc(struct hns3_adapter *hns)
1128{
1129 struct hns3_hw *hw = &hns->hw;
1130 struct hns3_tx_queue *txq;
1131 int i, num;
1132
1133 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1134 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
1135 int j;
1136
1137 if (!tc_queue->enable)
1138 continue;
1139
1140 for (j = 0; j < tc_queue->tqp_count; j++) {
1141 num = tc_queue->tqp_offset + j;
1142 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1143 if (txq == NULL)
1144 continue;
1145
1146 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1147 }
1148 }
1149}
1150
1151static int
1152hns3_init_rx_queues(struct hns3_adapter *hns)
1153{
1154 struct hns3_hw *hw = &hns->hw;
1155 struct hns3_rx_queue *rxq;
1156 uint16_t i, j;
1157 int ret;
1158
1159
1160 ret = hns3_config_rss(hns);
1161 if (ret) {
1162 hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1163 return ret;
1164 }
1165
1166 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1167 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1168 if (!rxq) {
1169 hns3_err(hw, "Rx queue %u not available or setup.", i);
1170 goto out;
1171 }
1172
1173 if (rxq->rx_deferred_start)
1174 continue;
1175
1176 ret = hns3_init_rxq(hns, i);
1177 if (ret) {
1178 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1179 ret);
1180 goto out;
1181 }
1182 }
1183
1184 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1185 hns3_init_fake_rxq(hns, i);
1186
1187 return 0;
1188
1189out:
1190 for (j = 0; j < i; j++) {
1191 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1192 hns3_rx_queue_release_mbufs(rxq);
1193 }
1194
1195 return ret;
1196}
1197
1198static int
1199hns3_init_tx_queues(struct hns3_adapter *hns)
1200{
1201 struct hns3_hw *hw = &hns->hw;
1202 struct hns3_tx_queue *txq;
1203 uint16_t i;
1204
1205 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1206 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1207 if (!txq) {
1208 hns3_err(hw, "Tx queue %u not available or setup.", i);
1209 return -EINVAL;
1210 }
1211
1212 if (txq->tx_deferred_start)
1213 continue;
1214 hns3_init_txq(txq);
1215 }
1216
1217 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1218 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1219 hns3_init_txq(txq);
1220 }
1221 hns3_init_tx_ring_tc(hns);
1222
1223 return 0;
1224}
1225
1226
1227
1228
1229
1230int
1231hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1232{
1233 struct hns3_hw *hw = &hns->hw;
1234 int ret;
1235
1236 if (reset_queue) {
1237 ret = hns3_reset_all_tqps(hns);
1238 if (ret) {
1239 hns3_err(hw, "failed to reset all queues, ret = %d.",
1240 ret);
1241 return ret;
1242 }
1243 }
1244
1245 ret = hns3_init_rx_queues(hns);
1246 if (ret) {
1247 hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1248 return ret;
1249 }
1250
1251 ret = hns3_init_tx_queues(hns);
1252 if (ret) {
1253 hns3_dev_release_mbufs(hns);
1254 hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1255 }
1256
1257 return ret;
1258}
1259
1260void
1261hns3_start_tqps(struct hns3_hw *hw)
1262{
1263 struct hns3_tx_queue *txq;
1264 struct hns3_rx_queue *rxq;
1265 uint16_t i;
1266
1267 hns3_enable_all_queues(hw, true);
1268
1269 for (i = 0; i < hw->data->nb_tx_queues; i++) {
1270 txq = hw->data->tx_queues[i];
1271 if (txq->enabled)
1272 hw->data->tx_queue_state[i] =
1273 RTE_ETH_QUEUE_STATE_STARTED;
1274 }
1275
1276 for (i = 0; i < hw->data->nb_rx_queues; i++) {
1277 rxq = hw->data->rx_queues[i];
1278 if (rxq->enabled)
1279 hw->data->rx_queue_state[i] =
1280 RTE_ETH_QUEUE_STATE_STARTED;
1281 }
1282}
1283
1284void
1285hns3_stop_tqps(struct hns3_hw *hw)
1286{
1287 uint16_t i;
1288
1289 hns3_enable_all_queues(hw, false);
1290
1291 for (i = 0; i < hw->data->nb_tx_queues; i++)
1292 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1293
1294 for (i = 0; i < hw->data->nb_rx_queues; i++)
1295 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1296}
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313int
1314hns3_rxq_iterate(struct rte_eth_dev *dev,
1315 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1316{
1317 uint32_t i;
1318 int ret;
1319
1320 if (dev->data->rx_queues == NULL)
1321 return -EINVAL;
1322
1323 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1324 ret = callback(dev->data->rx_queues[i], arg);
1325 if (ret != 0)
1326 return ret;
1327 }
1328
1329 return 0;
1330}
1331
1332static void*
1333hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1334 struct hns3_queue_info *q_info)
1335{
1336 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337 const struct rte_memzone *rx_mz;
1338 struct hns3_rx_queue *rxq;
1339 unsigned int rx_desc;
1340
1341 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1342 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1343 if (rxq == NULL) {
1344 hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
1345 q_info->idx);
1346 return NULL;
1347 }
1348
1349
1350 rxq->queue_id = q_info->idx;
1351 rxq->nb_rx_desc = q_info->nb_desc;
1352
1353
1354
1355
1356
1357 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1358 sizeof(struct hns3_desc);
1359 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1360 rx_desc, HNS3_RING_BASE_ALIGN,
1361 q_info->socket_id);
1362 if (rx_mz == NULL) {
1363 hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
1364 q_info->idx);
1365 hns3_rx_queue_release(rxq);
1366 return NULL;
1367 }
1368 rxq->mz = rx_mz;
1369 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1370 rxq->rx_ring_phys_addr = rx_mz->iova;
1371
1372 hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
1373 rxq->rx_ring_phys_addr);
1374
1375 return rxq;
1376}
1377
1378static int
1379hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1380 uint16_t nb_desc, unsigned int socket_id)
1381{
1382 struct hns3_adapter *hns = dev->data->dev_private;
1383 struct hns3_hw *hw = &hns->hw;
1384 struct hns3_queue_info q_info;
1385 struct hns3_rx_queue *rxq;
1386 uint16_t nb_rx_q;
1387
1388 if (hw->fkq_data.rx_queues[idx]) {
1389 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1390 hw->fkq_data.rx_queues[idx] = NULL;
1391 }
1392
1393 q_info.idx = idx;
1394 q_info.socket_id = socket_id;
1395 q_info.nb_desc = nb_desc;
1396 q_info.type = "hns3 fake RX queue";
1397 q_info.ring_name = "rx_fake_ring";
1398 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1399 if (rxq == NULL) {
1400 hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
1401 return -ENOMEM;
1402 }
1403
1404
1405 rxq->sw_ring = NULL;
1406
1407 rxq->hns = hns;
1408 rxq->rx_deferred_start = false;
1409 rxq->port_id = dev->data->port_id;
1410 rxq->configured = true;
1411 nb_rx_q = dev->data->nb_rx_queues;
1412 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1413 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1414 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1415
1416 rte_spinlock_lock(&hw->lock);
1417 hw->fkq_data.rx_queues[idx] = rxq;
1418 rte_spinlock_unlock(&hw->lock);
1419
1420 return 0;
1421}
1422
1423static void*
1424hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1425 struct hns3_queue_info *q_info)
1426{
1427 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428 const struct rte_memzone *tx_mz;
1429 struct hns3_tx_queue *txq;
1430 struct hns3_desc *desc;
1431 unsigned int tx_desc;
1432 int i;
1433
1434 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1435 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1436 if (txq == NULL) {
1437 hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
1438 q_info->idx);
1439 return NULL;
1440 }
1441
1442
1443 txq->queue_id = q_info->idx;
1444 txq->nb_tx_desc = q_info->nb_desc;
1445 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1446 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1447 tx_desc, HNS3_RING_BASE_ALIGN,
1448 q_info->socket_id);
1449 if (tx_mz == NULL) {
1450 hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
1451 q_info->idx);
1452 hns3_tx_queue_release(txq);
1453 return NULL;
1454 }
1455 txq->mz = tx_mz;
1456 txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1457 txq->tx_ring_phys_addr = tx_mz->iova;
1458
1459 hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
1460 txq->tx_ring_phys_addr);
1461
1462
1463 desc = txq->tx_ring;
1464 for (i = 0; i < txq->nb_tx_desc; i++) {
1465 desc->tx.tp_fe_sc_vld_ra_ri = 0;
1466 desc++;
1467 }
1468
1469 return txq;
1470}
1471
1472static int
1473hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1474 uint16_t nb_desc, unsigned int socket_id)
1475{
1476 struct hns3_adapter *hns = dev->data->dev_private;
1477 struct hns3_hw *hw = &hns->hw;
1478 struct hns3_queue_info q_info;
1479 struct hns3_tx_queue *txq;
1480 uint16_t nb_tx_q;
1481
1482 if (hw->fkq_data.tx_queues[idx] != NULL) {
1483 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1484 hw->fkq_data.tx_queues[idx] = NULL;
1485 }
1486
1487 q_info.idx = idx;
1488 q_info.socket_id = socket_id;
1489 q_info.nb_desc = nb_desc;
1490 q_info.type = "hns3 fake TX queue";
1491 q_info.ring_name = "tx_fake_ring";
1492 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1493 if (txq == NULL) {
1494 hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
1495 return -ENOMEM;
1496 }
1497
1498
1499 txq->sw_ring = NULL;
1500 txq->free = NULL;
1501
1502 txq->hns = hns;
1503 txq->tx_deferred_start = false;
1504 txq->port_id = dev->data->port_id;
1505 txq->configured = true;
1506 nb_tx_q = dev->data->nb_tx_queues;
1507 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1508 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1509
1510 rte_spinlock_lock(&hw->lock);
1511 hw->fkq_data.tx_queues[idx] = txq;
1512 rte_spinlock_unlock(&hw->lock);
1513
1514 return 0;
1515}
1516
1517static int
1518hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1519{
1520 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1521 void **rxq;
1522 uint16_t i;
1523
1524 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1525
1526 uint32_t size;
1527 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1528 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1529 RTE_CACHE_LINE_SIZE);
1530 if (hw->fkq_data.rx_queues == NULL) {
1531 hw->fkq_data.nb_fake_rx_queues = 0;
1532 return -ENOMEM;
1533 }
1534 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1535
1536 rxq = hw->fkq_data.rx_queues;
1537 for (i = nb_queues; i < old_nb_queues; i++)
1538 hns3_dev_rx_queue_release(rxq[i]);
1539
1540 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1541 RTE_CACHE_LINE_SIZE);
1542 if (rxq == NULL)
1543 return -ENOMEM;
1544 if (nb_queues > old_nb_queues) {
1545 uint16_t new_qs = nb_queues - old_nb_queues;
1546 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1547 }
1548
1549 hw->fkq_data.rx_queues = rxq;
1550 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1551 rxq = hw->fkq_data.rx_queues;
1552 for (i = nb_queues; i < old_nb_queues; i++)
1553 hns3_dev_rx_queue_release(rxq[i]);
1554
1555 rte_free(hw->fkq_data.rx_queues);
1556 hw->fkq_data.rx_queues = NULL;
1557 }
1558
1559 hw->fkq_data.nb_fake_rx_queues = nb_queues;
1560
1561 return 0;
1562}
1563
1564static int
1565hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1566{
1567 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1568 void **txq;
1569 uint16_t i;
1570
1571 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1572
1573 uint32_t size;
1574 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1575 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1576 RTE_CACHE_LINE_SIZE);
1577 if (hw->fkq_data.tx_queues == NULL) {
1578 hw->fkq_data.nb_fake_tx_queues = 0;
1579 return -ENOMEM;
1580 }
1581 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1582
1583 txq = hw->fkq_data.tx_queues;
1584 for (i = nb_queues; i < old_nb_queues; i++)
1585 hns3_dev_tx_queue_release(txq[i]);
1586 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1587 RTE_CACHE_LINE_SIZE);
1588 if (txq == NULL)
1589 return -ENOMEM;
1590 if (nb_queues > old_nb_queues) {
1591 uint16_t new_qs = nb_queues - old_nb_queues;
1592 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1593 }
1594
1595 hw->fkq_data.tx_queues = txq;
1596 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1597 txq = hw->fkq_data.tx_queues;
1598 for (i = nb_queues; i < old_nb_queues; i++)
1599 hns3_dev_tx_queue_release(txq[i]);
1600
1601 rte_free(hw->fkq_data.tx_queues);
1602 hw->fkq_data.tx_queues = NULL;
1603 }
1604 hw->fkq_data.nb_fake_tx_queues = nb_queues;
1605
1606 return 0;
1607}
1608
1609int
1610hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1611 uint16_t nb_tx_q)
1612{
1613 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1614 uint16_t rx_need_add_nb_q;
1615 uint16_t tx_need_add_nb_q;
1616 uint16_t port_id;
1617 uint16_t q;
1618 int ret;
1619
1620 if (hns3_dev_indep_txrx_supported(hw))
1621 return 0;
1622
1623
1624 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1625 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1626 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1627 if (ret) {
1628 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1629 return ret;
1630 }
1631
1632 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1633 if (ret) {
1634 hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1635 goto cfg_fake_tx_q_fail;
1636 }
1637
1638
1639 port_id = hw->data->port_id;
1640 for (q = 0; q < rx_need_add_nb_q; q++) {
1641 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1642 rte_eth_dev_socket_id(port_id));
1643 if (ret)
1644 goto setup_fake_rx_q_fail;
1645 }
1646
1647
1648 for (q = 0; q < tx_need_add_nb_q; q++) {
1649 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1650 rte_eth_dev_socket_id(port_id));
1651 if (ret)
1652 goto setup_fake_tx_q_fail;
1653 }
1654
1655 return 0;
1656
1657setup_fake_tx_q_fail:
1658setup_fake_rx_q_fail:
1659 (void)hns3_fake_tx_queue_config(hw, 0);
1660cfg_fake_tx_q_fail:
1661 (void)hns3_fake_rx_queue_config(hw, 0);
1662
1663 return ret;
1664}
1665
1666void
1667hns3_dev_release_mbufs(struct hns3_adapter *hns)
1668{
1669 struct rte_eth_dev_data *dev_data = hns->hw.data;
1670 struct hns3_rx_queue *rxq;
1671 struct hns3_tx_queue *txq;
1672 int i;
1673
1674 if (dev_data->rx_queues)
1675 for (i = 0; i < dev_data->nb_rx_queues; i++) {
1676 rxq = dev_data->rx_queues[i];
1677 if (rxq == NULL)
1678 continue;
1679 hns3_rx_queue_release_mbufs(rxq);
1680 }
1681
1682 if (dev_data->tx_queues)
1683 for (i = 0; i < dev_data->nb_tx_queues; i++) {
1684 txq = dev_data->tx_queues[i];
1685 if (txq == NULL)
1686 continue;
1687 hns3_tx_queue_release_mbufs(txq);
1688 }
1689}
1690
1691static int
1692hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1693{
1694 uint16_t vld_buf_size;
1695 uint16_t num_hw_specs;
1696 uint16_t i;
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710 static const uint16_t hw_rx_buf_size[] = {
1711 HNS3_4K_BD_BUF_SIZE,
1712 HNS3_2K_BD_BUF_SIZE,
1713 HNS3_1K_BD_BUF_SIZE,
1714 HNS3_512_BD_BUF_SIZE
1715 };
1716
1717 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1718 RTE_PKTMBUF_HEADROOM);
1719 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1720 return -EINVAL;
1721
1722 num_hw_specs = RTE_DIM(hw_rx_buf_size);
1723 for (i = 0; i < num_hw_specs; i++) {
1724 if (vld_buf_size >= hw_rx_buf_size[i]) {
1725 *rx_buf_len = hw_rx_buf_size[i];
1726 break;
1727 }
1728 }
1729 return 0;
1730}
1731
1732static int
1733hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1734 uint16_t nb_desc)
1735{
1736 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1737 struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
1738 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1739 uint16_t min_vec_bds;
1740
1741
1742
1743
1744
1745
1746
1747 if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
1748 hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
1749 "than rx_buf_len if scattered is off.");
1750 return -EINVAL;
1751 }
1752
1753 if (pkt_burst == hns3_recv_pkts_vec) {
1754 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1755 HNS3_DEFAULT_RX_BURST;
1756 if (nb_desc < min_vec_bds ||
1757 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1758 hns3_err(hw, "if Rx burst mode is vector, "
1759 "number of descriptor is required to be "
1760 "bigger than min vector bds:%u, and could be "
1761 "divided by rxq rearm thresh:%u.",
1762 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1763 return -EINVAL;
1764 }
1765 }
1766 return 0;
1767}
1768
1769static int
1770hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1771 struct rte_mempool *mp, uint16_t nb_desc,
1772 uint16_t *buf_size)
1773{
1774 int ret;
1775
1776 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1777 nb_desc % HNS3_ALIGN_RING_DESC) {
1778 hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1779 nb_desc);
1780 return -EINVAL;
1781 }
1782
1783 if (conf->rx_drop_en == 0)
1784 hns3_warn(hw, "if no descriptors available, packets are always "
1785 "dropped and rx_drop_en (1) is fixed on");
1786
1787 if (hns3_rx_buf_len_calc(mp, buf_size)) {
1788 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1789 "minimal data room size (%u).",
1790 rte_pktmbuf_data_room_size(mp),
1791 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1792 return -EINVAL;
1793 }
1794
1795 if (hw->data->dev_started) {
1796 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1797 if (ret) {
1798 hns3_err(hw, "Rx queue runtime setup fail.");
1799 return ret;
1800 }
1801 }
1802
1803 return 0;
1804}
1805
1806uint32_t
1807hns3_get_tqp_reg_offset(uint16_t queue_id)
1808{
1809 uint32_t reg_offset;
1810
1811
1812 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
1813 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
1814 else
1815 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
1816 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
1817 HNS3_TQP_REG_SIZE;
1818
1819 return reg_offset;
1820}
1821
1822int
1823hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1824 unsigned int socket_id, const struct rte_eth_rxconf *conf,
1825 struct rte_mempool *mp)
1826{
1827 struct hns3_adapter *hns = dev->data->dev_private;
1828 struct hns3_hw *hw = &hns->hw;
1829 struct hns3_queue_info q_info;
1830 struct hns3_rx_queue *rxq;
1831 uint16_t rx_buf_size;
1832 int rx_entry_len;
1833 int ret;
1834
1835 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1836 if (ret)
1837 return ret;
1838
1839 if (dev->data->rx_queues[idx]) {
1840 hns3_rx_queue_release(dev->data->rx_queues[idx]);
1841 dev->data->rx_queues[idx] = NULL;
1842 }
1843
1844 q_info.idx = idx;
1845 q_info.socket_id = socket_id;
1846 q_info.nb_desc = nb_desc;
1847 q_info.type = "hns3 RX queue";
1848 q_info.ring_name = "rx_ring";
1849
1850 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1851 if (rxq == NULL) {
1852 hns3_err(hw,
1853 "Failed to alloc mem and reserve DMA mem for rx ring!");
1854 return -ENOMEM;
1855 }
1856
1857 rxq->hns = hns;
1858 rxq->ptype_tbl = &hns->ptype_tbl;
1859 rxq->mb_pool = mp;
1860 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1861 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1862
1863 rxq->rx_deferred_start = conf->rx_deferred_start;
1864 if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
1865 hns3_warn(hw, "deferred start is not supported.");
1866 rxq->rx_deferred_start = false;
1867 }
1868
1869 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1870 sizeof(struct hns3_entry);
1871 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1872 RTE_CACHE_LINE_SIZE, socket_id);
1873 if (rxq->sw_ring == NULL) {
1874 hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1875 hns3_rx_queue_release(rxq);
1876 return -ENOMEM;
1877 }
1878
1879 rxq->next_to_use = 0;
1880 rxq->rx_free_hold = 0;
1881 rxq->rx_rearm_start = 0;
1882 rxq->rx_rearm_nb = 0;
1883 rxq->pkt_first_seg = NULL;
1884 rxq->pkt_last_seg = NULL;
1885 rxq->port_id = dev->data->port_id;
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1896 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1897 HNS3_PORT_BASE_VLAN_ENABLE;
1898 else
1899 rxq->pvid_sw_discard_en = false;
1900 rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
1901 rxq->configured = true;
1902 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1903 idx * HNS3_TQP_REG_SIZE);
1904 rxq->io_base = (void *)((char *)hw->io_base +
1905 hns3_get_tqp_reg_offset(idx));
1906 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1907 HNS3_RING_RX_HEAD_REG);
1908 rxq->rx_buf_len = rx_buf_size;
1909 memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
1910 memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
1911 memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
1912
1913
1914 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1915 rxq->crc_len = RTE_ETHER_CRC_LEN;
1916 else
1917 rxq->crc_len = 0;
1918
1919 rxq->bulk_mbuf_num = 0;
1920
1921 rte_spinlock_lock(&hw->lock);
1922 dev->data->rx_queues[idx] = rxq;
1923 rte_spinlock_unlock(&hw->lock);
1924
1925 return 0;
1926}
1927
1928void
1929hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1930{
1931 struct hns3_adapter *hns = dev->data->dev_private;
1932 struct hns3_hw *hw = &hns->hw;
1933
1934 hw->rx_buf_len = 0;
1935 dev->data->scattered_rx = false;
1936}
1937
1938void
1939hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1940{
1941 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1942 struct hns3_adapter *hns = dev->data->dev_private;
1943 struct hns3_hw *hw = &hns->hw;
1944 struct hns3_rx_queue *rxq;
1945 uint32_t queue_id;
1946
1947 if (dev->data->rx_queues == NULL)
1948 return;
1949
1950 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1951 rxq = dev->data->rx_queues[queue_id];
1952 if (hw->rx_buf_len == 0)
1953 hw->rx_buf_len = rxq->rx_buf_len;
1954 else
1955 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1956 rxq->rx_buf_len);
1957 }
1958
1959 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
1960 dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
1961 dev->data->scattered_rx = true;
1962}
1963
1964const uint32_t *
1965hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1966{
1967 static const uint32_t ptypes[] = {
1968 RTE_PTYPE_L2_ETHER,
1969 RTE_PTYPE_L2_ETHER_LLDP,
1970 RTE_PTYPE_L2_ETHER_ARP,
1971 RTE_PTYPE_L3_IPV4,
1972 RTE_PTYPE_L3_IPV4_EXT,
1973 RTE_PTYPE_L3_IPV6,
1974 RTE_PTYPE_L3_IPV6_EXT,
1975 RTE_PTYPE_L4_IGMP,
1976 RTE_PTYPE_L4_ICMP,
1977 RTE_PTYPE_L4_SCTP,
1978 RTE_PTYPE_L4_TCP,
1979 RTE_PTYPE_L4_UDP,
1980 RTE_PTYPE_TUNNEL_GRE,
1981 RTE_PTYPE_INNER_L2_ETHER,
1982 RTE_PTYPE_INNER_L3_IPV4,
1983 RTE_PTYPE_INNER_L3_IPV6,
1984 RTE_PTYPE_INNER_L3_IPV4_EXT,
1985 RTE_PTYPE_INNER_L3_IPV6_EXT,
1986 RTE_PTYPE_INNER_L4_UDP,
1987 RTE_PTYPE_INNER_L4_TCP,
1988 RTE_PTYPE_INNER_L4_SCTP,
1989 RTE_PTYPE_INNER_L4_ICMP,
1990 RTE_PTYPE_TUNNEL_VXLAN,
1991 RTE_PTYPE_TUNNEL_NVGRE,
1992 RTE_PTYPE_UNKNOWN
1993 };
1994 static const uint32_t adv_layout_ptypes[] = {
1995 RTE_PTYPE_L2_ETHER,
1996 RTE_PTYPE_L2_ETHER_TIMESYNC,
1997 RTE_PTYPE_L2_ETHER_LLDP,
1998 RTE_PTYPE_L2_ETHER_ARP,
1999 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2000 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2001 RTE_PTYPE_L4_FRAG,
2002 RTE_PTYPE_L4_NONFRAG,
2003 RTE_PTYPE_L4_UDP,
2004 RTE_PTYPE_L4_TCP,
2005 RTE_PTYPE_L4_SCTP,
2006 RTE_PTYPE_L4_IGMP,
2007 RTE_PTYPE_L4_ICMP,
2008 RTE_PTYPE_TUNNEL_GRE,
2009 RTE_PTYPE_TUNNEL_GRENAT,
2010 RTE_PTYPE_INNER_L2_ETHER,
2011 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2012 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2013 RTE_PTYPE_INNER_L4_FRAG,
2014 RTE_PTYPE_INNER_L4_ICMP,
2015 RTE_PTYPE_INNER_L4_NONFRAG,
2016 RTE_PTYPE_INNER_L4_UDP,
2017 RTE_PTYPE_INNER_L4_TCP,
2018 RTE_PTYPE_INNER_L4_SCTP,
2019 RTE_PTYPE_INNER_L4_ICMP,
2020 RTE_PTYPE_UNKNOWN
2021 };
2022 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023
2024 if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
2025 dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
2026 dev->rx_pkt_burst == hns3_recv_pkts_vec ||
2027 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
2028 if (hns3_dev_rxd_adv_layout_supported(hw))
2029 return adv_layout_ptypes;
2030 else
2031 return ptypes;
2032 }
2033
2034 return NULL;
2035}
2036
2037static void
2038hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2039{
2040 tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2041 tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2042 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
2043 tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2044 tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2045 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
2046
2047 tbl->l4table[0] = RTE_PTYPE_L4_UDP;
2048 tbl->l4table[1] = RTE_PTYPE_L4_TCP;
2049 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
2050 tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
2051 tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
2052 tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
2053}
2054
2055static void
2056hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
2057{
2058 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER |
2059 RTE_PTYPE_INNER_L3_IPV4;
2060 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER |
2061 RTE_PTYPE_INNER_L3_IPV6;
2062
2063 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
2064 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
2065 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER |
2066 RTE_PTYPE_INNER_L3_IPV4_EXT;
2067 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER |
2068 RTE_PTYPE_INNER_L3_IPV6_EXT;
2069
2070 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
2071 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
2072
2073 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
2074 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
2075
2076 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
2077 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
2078
2079 tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2080 tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2081 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
2082 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
2083 tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
2084 tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
2085
2086 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
2087 tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
2088 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
2089}
2090
2091static void
2092hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
2093{
2094 uint32_t *ptype = tbl->ptype;
2095
2096
2097 ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
2098 ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
2099 ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
2100
2101
2102 ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2103 RTE_PTYPE_L4_FRAG;
2104 ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2105 RTE_PTYPE_L4_NONFRAG;
2106 ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2107 RTE_PTYPE_L4_UDP;
2108 ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2109 RTE_PTYPE_L4_TCP;
2110 ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2111 RTE_PTYPE_TUNNEL_GRE;
2112 ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2113 RTE_PTYPE_L4_SCTP;
2114 ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2115 RTE_PTYPE_L4_IGMP;
2116 ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2117 RTE_PTYPE_L4_ICMP;
2118
2119 ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2120 RTE_PTYPE_L4_UDP;
2121
2122
2123 ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2124 RTE_PTYPE_TUNNEL_GRENAT;
2125
2126 ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2127 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2128
2129
2130 ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2131 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2133 RTE_PTYPE_INNER_L4_FRAG;
2134 ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2135 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2136 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2137 RTE_PTYPE_INNER_L4_NONFRAG;
2138 ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2139 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2140 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2141 RTE_PTYPE_INNER_L4_UDP;
2142 ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2143 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2145 RTE_PTYPE_INNER_L4_TCP;
2146 ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2147 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2149 RTE_PTYPE_INNER_L4_SCTP;
2150
2151 ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2152 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2154 ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2155 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2156 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2157 RTE_PTYPE_INNER_L4_ICMP;
2158
2159
2160 ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2161 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2162 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2163 RTE_PTYPE_INNER_L4_FRAG;
2164 ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2165 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2166 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2167 RTE_PTYPE_INNER_L4_NONFRAG;
2168 ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2169 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2170 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2171 RTE_PTYPE_INNER_L4_UDP;
2172 ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2173 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2174 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2175 RTE_PTYPE_INNER_L4_TCP;
2176 ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2177 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2178 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2179 RTE_PTYPE_INNER_L4_SCTP;
2180
2181 ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2182 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2183 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2184 ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2185 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2187 RTE_PTYPE_INNER_L4_ICMP;
2188
2189
2190 ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2191 RTE_PTYPE_L4_FRAG;
2192 ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2193 RTE_PTYPE_L4_NONFRAG;
2194 ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2195 RTE_PTYPE_L4_UDP;
2196 ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2197 RTE_PTYPE_L4_TCP;
2198 ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2199 RTE_PTYPE_TUNNEL_GRE;
2200 ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2201 RTE_PTYPE_L4_SCTP;
2202 ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2203 RTE_PTYPE_L4_IGMP;
2204 ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2205 RTE_PTYPE_L4_ICMP;
2206
2207 ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2208 RTE_PTYPE_L4_UDP;
2209
2210
2211 ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2212 RTE_PTYPE_TUNNEL_GRENAT;
2213
2214 ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2215 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2216
2217
2218 ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2219 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2220 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2221 RTE_PTYPE_INNER_L4_FRAG;
2222 ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2223 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2224 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2225 RTE_PTYPE_INNER_L4_NONFRAG;
2226 ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2227 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2228 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2229 RTE_PTYPE_INNER_L4_UDP;
2230 ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2231 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2233 RTE_PTYPE_INNER_L4_TCP;
2234 ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2235 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2236 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2237 RTE_PTYPE_INNER_L4_SCTP;
2238
2239 ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2240 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2241 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2242 ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2243 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2245 RTE_PTYPE_INNER_L4_ICMP;
2246
2247
2248 ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2249 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2250 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2251 RTE_PTYPE_INNER_L4_FRAG;
2252 ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2253 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2255 RTE_PTYPE_INNER_L4_NONFRAG;
2256 ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2257 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2258 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2259 RTE_PTYPE_INNER_L4_UDP;
2260 ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2261 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2262 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2263 RTE_PTYPE_INNER_L4_TCP;
2264 ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2265 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2267 RTE_PTYPE_INNER_L4_SCTP;
2268
2269 ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2270 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2271 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2272 ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2273 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2275 RTE_PTYPE_INNER_L4_ICMP;
2276}
2277
2278void
2279hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
2280{
2281 struct hns3_adapter *hns = dev->data->dev_private;
2282 struct hns3_ptype_table *tbl = &hns->ptype_tbl;
2283
2284 memset(tbl, 0, sizeof(*tbl));
2285
2286 hns3_init_non_tunnel_ptype_tbl(tbl);
2287 hns3_init_tunnel_ptype_tbl(tbl);
2288 hns3_init_adv_layout_ptype(tbl);
2289}
2290
2291static inline void
2292hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
2293 uint32_t l234_info, const struct hns3_desc *rxd)
2294{
2295#define HNS3_STRP_STATUS_NUM 0x4
2296
2297#define HNS3_NO_STRP_VLAN_VLD 0x0
2298#define HNS3_INNER_STRP_VLAN_VLD 0x1
2299#define HNS3_OUTER_STRP_VLAN_VLD 0x2
2300 uint32_t strip_status;
2301 uint32_t report_mode;
2302
2303
2304
2305
2306
2307
2308
2309 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
2310 {
2311 HNS3_NO_STRP_VLAN_VLD,
2312 HNS3_OUTER_STRP_VLAN_VLD,
2313 HNS3_INNER_STRP_VLAN_VLD,
2314 HNS3_OUTER_STRP_VLAN_VLD
2315 },
2316 {
2317 HNS3_NO_STRP_VLAN_VLD,
2318 HNS3_NO_STRP_VLAN_VLD,
2319 HNS3_NO_STRP_VLAN_VLD,
2320 HNS3_INNER_STRP_VLAN_VLD
2321 }
2322 };
2323 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
2324 HNS3_RXD_STRP_TAGP_S);
2325 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
2326 switch (report_mode) {
2327 case HNS3_NO_STRP_VLAN_VLD:
2328 mb->vlan_tci = 0;
2329 return;
2330 case HNS3_INNER_STRP_VLAN_VLD:
2331 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2332 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2333 return;
2334 case HNS3_OUTER_STRP_VLAN_VLD:
2335 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2336 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2337 return;
2338 default:
2339 mb->vlan_tci = 0;
2340 return;
2341 }
2342}
2343
2344static inline void
2345recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
2346 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
2347 uint16_t data_len)
2348{
2349 uint8_t crc_len = rxq->crc_len;
2350
2351 if (data_len <= crc_len) {
2352 rte_pktmbuf_free_seg(rxm);
2353 first_seg->nb_segs--;
2354 last_seg->data_len = (uint16_t)(last_seg->data_len -
2355 (crc_len - data_len));
2356 last_seg->next = NULL;
2357 } else
2358 rxm->data_len = (uint16_t)(data_len - crc_len);
2359}
2360
2361static inline struct rte_mbuf *
2362hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2363{
2364 int ret;
2365
2366 if (likely(rxq->bulk_mbuf_num > 0))
2367 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2368
2369 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2370 HNS3_BULK_ALLOC_MBUF_NUM);
2371 if (likely(ret == 0)) {
2372 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2373 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2374 } else
2375 return rte_mbuf_raw_alloc(rxq->mb_pool);
2376}
2377
2378static inline void
2379hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
2380 volatile struct hns3_desc *rxd)
2381{
2382 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
2383 uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
2384
2385 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
2386 if (hns3_timestamp_rx_dynflag > 0) {
2387 *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
2388 rte_mbuf_timestamp_t *) = timestamp;
2389 mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
2390 }
2391
2392 pf->rx_timestamp = timestamp;
2393}
2394
2395uint16_t
2396hns3_recv_pkts_simple(void *rx_queue,
2397 struct rte_mbuf **rx_pkts,
2398 uint16_t nb_pkts)
2399{
2400 volatile struct hns3_desc *rx_ring;
2401 volatile struct hns3_desc *rxdp;
2402 struct hns3_rx_queue *rxq;
2403 struct hns3_entry *sw_ring;
2404 struct hns3_entry *rxe;
2405 struct hns3_desc rxd;
2406 struct rte_mbuf *nmb;
2407 struct rte_mbuf *rxm;
2408 uint32_t bd_base_info;
2409 uint32_t l234_info;
2410 uint32_t ol_info;
2411 uint64_t dma_addr;
2412 uint16_t nb_rx_bd;
2413 uint16_t nb_rx;
2414 uint16_t rx_id;
2415 int ret;
2416
2417 nb_rx = 0;
2418 nb_rx_bd = 0;
2419 rxq = rx_queue;
2420 rx_ring = rxq->rx_ring;
2421 sw_ring = rxq->sw_ring;
2422 rx_id = rxq->next_to_use;
2423
2424 while (nb_rx < nb_pkts) {
2425 rxdp = &rx_ring[rx_id];
2426 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2427 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2428 break;
2429
2430 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2431 (1u << HNS3_RXD_VLD_B)];
2432
2433 nmb = hns3_rx_alloc_buffer(rxq);
2434 if (unlikely(nmb == NULL)) {
2435 uint16_t port_id;
2436
2437 port_id = rxq->port_id;
2438 rte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;
2439 break;
2440 }
2441
2442 nb_rx_bd++;
2443 rxe = &sw_ring[rx_id];
2444 rx_id++;
2445 if (unlikely(rx_id == rxq->nb_rx_desc))
2446 rx_id = 0;
2447
2448 rte_prefetch0(sw_ring[rx_id].mbuf);
2449 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2450 rte_prefetch0(&rx_ring[rx_id]);
2451 rte_prefetch0(&sw_ring[rx_id]);
2452 }
2453
2454 rxm = rxe->mbuf;
2455 rxm->ol_flags = 0;
2456 rxe->mbuf = nmb;
2457
2458 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2459 hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
2460
2461 dma_addr = rte_mbuf_data_iova_default(nmb);
2462 rxdp->addr = rte_cpu_to_le_64(dma_addr);
2463 rxdp->rx.bd_base_info = 0;
2464
2465 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2466 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2467 rxq->crc_len;
2468 rxm->data_len = rxm->pkt_len;
2469 rxm->port = rxq->port_id;
2470 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2471 rxm->ol_flags |= PKT_RX_RSS_HASH;
2472 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2473 rxm->hash.fdir.hi =
2474 rte_le_to_cpu_16(rxd.rx.fd_id);
2475 rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2476 }
2477 rxm->nb_segs = 1;
2478 rxm->next = NULL;
2479
2480
2481 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2482 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2483 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
2484 if (unlikely(ret))
2485 goto pkt_err;
2486
2487 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2488
2489 if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2490 rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
2491
2492 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2493
2494
2495 rxq->basic_stats.bytes += rxm->pkt_len;
2496
2497 rx_pkts[nb_rx++] = rxm;
2498 continue;
2499pkt_err:
2500 rte_pktmbuf_free(rxm);
2501 }
2502
2503 rxq->next_to_use = rx_id;
2504 rxq->rx_free_hold += nb_rx_bd;
2505 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2506 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2507 rxq->rx_free_hold = 0;
2508 }
2509
2510 return nb_rx;
2511}
2512
2513uint16_t
2514hns3_recv_scattered_pkts(void *rx_queue,
2515 struct rte_mbuf **rx_pkts,
2516 uint16_t nb_pkts)
2517{
2518 volatile struct hns3_desc *rx_ring;
2519 volatile struct hns3_desc *rxdp;
2520 struct hns3_rx_queue *rxq;
2521 struct hns3_entry *sw_ring;
2522 struct hns3_entry *rxe;
2523 struct rte_mbuf *first_seg;
2524 struct rte_mbuf *last_seg;
2525 struct hns3_desc rxd;
2526 struct rte_mbuf *nmb;
2527 struct rte_mbuf *rxm;
2528 struct rte_eth_dev *dev;
2529 uint32_t bd_base_info;
2530 uint32_t l234_info;
2531 uint32_t gro_size;
2532 uint32_t ol_info;
2533 uint64_t dma_addr;
2534 uint16_t nb_rx_bd;
2535 uint16_t nb_rx;
2536 uint16_t rx_id;
2537 int ret;
2538
2539 nb_rx = 0;
2540 nb_rx_bd = 0;
2541 rxq = rx_queue;
2542
2543 rx_id = rxq->next_to_use;
2544 rx_ring = rxq->rx_ring;
2545 sw_ring = rxq->sw_ring;
2546 first_seg = rxq->pkt_first_seg;
2547 last_seg = rxq->pkt_last_seg;
2548
2549 while (nb_rx < nb_pkts) {
2550 rxdp = &rx_ring[rx_id];
2551 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2552 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2553 break;
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2615 (1u << HNS3_RXD_VLD_B)];
2616
2617 nmb = hns3_rx_alloc_buffer(rxq);
2618 if (unlikely(nmb == NULL)) {
2619 dev = &rte_eth_devices[rxq->port_id];
2620 dev->data->rx_mbuf_alloc_failed++;
2621 break;
2622 }
2623
2624 nb_rx_bd++;
2625 rxe = &sw_ring[rx_id];
2626 rx_id++;
2627 if (unlikely(rx_id == rxq->nb_rx_desc))
2628 rx_id = 0;
2629
2630 rte_prefetch0(sw_ring[rx_id].mbuf);
2631 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2632 rte_prefetch0(&rx_ring[rx_id]);
2633 rte_prefetch0(&sw_ring[rx_id]);
2634 }
2635
2636 rxm = rxe->mbuf;
2637 rxe->mbuf = nmb;
2638
2639 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2640 rxdp->rx.bd_base_info = 0;
2641 rxdp->addr = dma_addr;
2642
2643 if (first_seg == NULL) {
2644 first_seg = rxm;
2645 first_seg->nb_segs = 1;
2646 } else {
2647 first_seg->nb_segs++;
2648 last_seg->next = rxm;
2649 }
2650
2651 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2652 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2653
2654 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2655 last_seg = rxm;
2656 rxm->next = NULL;
2657 continue;
2658 }
2659
2660 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
2661 hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp);
2662
2663
2664
2665
2666
2667
2668 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 rxm->next = NULL;
2681 if (unlikely(rxq->crc_len > 0)) {
2682 first_seg->pkt_len -= rxq->crc_len;
2683 recalculate_data_len(first_seg, last_seg, rxm, rxq,
2684 rxm->data_len);
2685 }
2686
2687 first_seg->port = rxq->port_id;
2688 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2689 first_seg->ol_flags = PKT_RX_RSS_HASH;
2690 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2691 first_seg->hash.fdir.hi =
2692 rte_le_to_cpu_16(rxd.rx.fd_id);
2693 first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2694 }
2695
2696 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
2697 HNS3_RXD_GRO_SIZE_S);
2698 if (gro_size != 0) {
2699 first_seg->ol_flags |= PKT_RX_LRO;
2700 first_seg->tso_segsz = gro_size;
2701 }
2702
2703 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2704 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2705 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2706 l234_info);
2707 if (unlikely(ret))
2708 goto pkt_err;
2709
2710 first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2711 l234_info, ol_info);
2712
2713 if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2714 rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
2715
2716 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2717
2718
2719 rxq->basic_stats.bytes += first_seg->pkt_len;
2720
2721 rx_pkts[nb_rx++] = first_seg;
2722 first_seg = NULL;
2723 continue;
2724pkt_err:
2725 rte_pktmbuf_free(first_seg);
2726 first_seg = NULL;
2727 }
2728
2729 rxq->next_to_use = rx_id;
2730 rxq->pkt_first_seg = first_seg;
2731 rxq->pkt_last_seg = last_seg;
2732
2733 rxq->rx_free_hold += nb_rx_bd;
2734 if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2735 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2736 rxq->rx_free_hold = 0;
2737 }
2738
2739 return nb_rx;
2740}
2741
2742void __rte_weak
2743hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2744{
2745}
2746
2747int __rte_weak
2748hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2749{
2750 return -ENOTSUP;
2751}
2752
2753uint16_t __rte_weak
2754hns3_recv_pkts_vec(__rte_unused void *tx_queue,
2755 __rte_unused struct rte_mbuf **rx_pkts,
2756 __rte_unused uint16_t nb_pkts)
2757{
2758 return 0;
2759}
2760
2761uint16_t __rte_weak
2762hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
2763 __rte_unused struct rte_mbuf **rx_pkts,
2764 __rte_unused uint16_t nb_pkts)
2765{
2766 return 0;
2767}
2768
2769int
2770hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2771 struct rte_eth_burst_mode *mode)
2772{
2773 static const struct {
2774 eth_rx_burst_t pkt_burst;
2775 const char *info;
2776 } burst_infos[] = {
2777 { hns3_recv_pkts_simple, "Scalar Simple" },
2778 { hns3_recv_scattered_pkts, "Scalar Scattered" },
2779 { hns3_recv_pkts_vec, "Vector Neon" },
2780 { hns3_recv_pkts_vec_sve, "Vector Sve" },
2781 };
2782
2783 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2784 int ret = -EINVAL;
2785 unsigned int i;
2786
2787 for (i = 0; i < RTE_DIM(burst_infos); i++) {
2788 if (pkt_burst == burst_infos[i].pkt_burst) {
2789 snprintf(mode->info, sizeof(mode->info), "%s",
2790 burst_infos[i].info);
2791 ret = 0;
2792 break;
2793 }
2794 }
2795
2796 return ret;
2797}
2798
2799static bool
2800hns3_get_default_vec_support(void)
2801{
2802#if defined(RTE_ARCH_ARM64)
2803 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
2804 return false;
2805 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
2806 return true;
2807#endif
2808 return false;
2809}
2810
2811static bool
2812hns3_get_sve_support(void)
2813{
2814#if defined(RTE_HAS_SVE_ACLE)
2815 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
2816 return false;
2817 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2818 return true;
2819#endif
2820 return false;
2821}
2822
2823static eth_rx_burst_t
2824hns3_get_rx_function(struct rte_eth_dev *dev)
2825{
2826 struct hns3_adapter *hns = dev->data->dev_private;
2827 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2828 bool vec_allowed, sve_allowed, simple_allowed;
2829 bool vec_support;
2830
2831 vec_support = hns3_rx_check_vec_support(dev) == 0;
2832 vec_allowed = vec_support && hns3_get_default_vec_support();
2833 sve_allowed = vec_support && hns3_get_sve_support();
2834 simple_allowed = !dev->data->scattered_rx &&
2835 (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
2836
2837 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
2838 return hns3_recv_pkts_vec;
2839 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
2840 return hns3_recv_pkts_vec_sve;
2841 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
2842 return hns3_recv_pkts_simple;
2843 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
2844 return hns3_recv_scattered_pkts;
2845
2846 if (vec_allowed)
2847 return hns3_recv_pkts_vec;
2848 if (simple_allowed)
2849 return hns3_recv_pkts_simple;
2850
2851 return hns3_recv_scattered_pkts;
2852}
2853
2854static int
2855hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
2856 uint16_t nb_desc, uint16_t *tx_rs_thresh,
2857 uint16_t *tx_free_thresh, uint16_t idx)
2858{
2859#define HNS3_TX_RS_FREE_THRESH_GAP 8
2860 uint16_t rs_thresh, free_thresh, fast_free_thresh;
2861
2862 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
2863 nb_desc % HNS3_ALIGN_RING_DESC) {
2864 hns3_err(hw, "number (%u) of tx descriptors is invalid",
2865 nb_desc);
2866 return -EINVAL;
2867 }
2868
2869 rs_thresh = (conf->tx_rs_thresh > 0) ?
2870 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
2871 free_thresh = (conf->tx_free_thresh > 0) ?
2872 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
2873 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
2874 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
2875 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
2876 hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
2877 "(%u) of tx descriptors for port=%u queue=%u check "
2878 "fail!",
2879 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
2880 idx);
2881 return -EINVAL;
2882 }
2883
2884 if (conf->tx_free_thresh == 0) {
2885
2886 fast_free_thresh = nb_desc - rs_thresh;
2887 if (fast_free_thresh >=
2888 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
2889 free_thresh = fast_free_thresh -
2890 HNS3_TX_FAST_FREE_AHEAD;
2891 }
2892
2893 *tx_rs_thresh = rs_thresh;
2894 *tx_free_thresh = free_thresh;
2895 return 0;
2896}
2897
2898static void *
2899hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
2900{
2901#define HNS3_TX_PUSH_TQP_REGION_SIZE 0x10000
2902#define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET 64
2903#define HNS3_TX_PUSH_PCI_BAR_INDEX 4
2904
2905 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2906 uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX;
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917 return (char *)pci_dev->mem_resource[bar_id].addr +
2918 (pci_dev->mem_resource[bar_id].len >> 1) +
2919 HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
2920 HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
2921}
2922
2923void
2924hns3_tx_push_init(struct rte_eth_dev *dev)
2925{
2926 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2927 volatile uint32_t *reg;
2928 uint32_t val;
2929
2930 if (!hns3_dev_tx_push_supported(hw))
2931 return;
2932
2933 reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
2934
2935
2936
2937
2938
2939
2940
2941 val = *reg;
2942 RTE_SET_USED(val);
2943}
2944
2945static void
2946hns3_tx_push_queue_init(struct rte_eth_dev *dev,
2947 uint16_t queue_id,
2948 struct hns3_tx_queue *txq)
2949{
2950 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2951 if (!hns3_dev_tx_push_supported(hw)) {
2952 txq->tx_push_enable = false;
2953 return;
2954 }
2955
2956 txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev,
2957 queue_id);
2958 txq->tx_push_enable = true;
2959}
2960
2961int
2962hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
2963 unsigned int socket_id, const struct rte_eth_txconf *conf)
2964{
2965 struct hns3_adapter *hns = dev->data->dev_private;
2966 uint16_t tx_rs_thresh, tx_free_thresh;
2967 struct hns3_hw *hw = &hns->hw;
2968 struct hns3_queue_info q_info;
2969 struct hns3_tx_queue *txq;
2970 int tx_entry_len;
2971 int ret;
2972
2973 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
2974 &tx_rs_thresh, &tx_free_thresh, idx);
2975 if (ret)
2976 return ret;
2977
2978 if (dev->data->tx_queues[idx] != NULL) {
2979 hns3_tx_queue_release(dev->data->tx_queues[idx]);
2980 dev->data->tx_queues[idx] = NULL;
2981 }
2982
2983 q_info.idx = idx;
2984 q_info.socket_id = socket_id;
2985 q_info.nb_desc = nb_desc;
2986 q_info.type = "hns3 TX queue";
2987 q_info.ring_name = "tx_ring";
2988 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
2989 if (txq == NULL) {
2990 hns3_err(hw,
2991 "Failed to alloc mem and reserve DMA mem for tx ring!");
2992 return -ENOMEM;
2993 }
2994
2995 txq->tx_deferred_start = conf->tx_deferred_start;
2996 if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
2997 hns3_warn(hw, "deferred start is not supported.");
2998 txq->tx_deferred_start = false;
2999 }
3000
3001 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
3002 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
3003 RTE_CACHE_LINE_SIZE, socket_id);
3004 if (txq->sw_ring == NULL) {
3005 hns3_err(hw, "Failed to allocate memory for tx sw ring!");
3006 hns3_tx_queue_release(txq);
3007 return -ENOMEM;
3008 }
3009
3010 txq->hns = hns;
3011 txq->next_to_use = 0;
3012 txq->next_to_clean = 0;
3013 txq->tx_bd_ready = txq->nb_tx_desc - 1;
3014 txq->tx_free_thresh = tx_free_thresh;
3015 txq->tx_rs_thresh = tx_rs_thresh;
3016 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
3017 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
3018 RTE_CACHE_LINE_SIZE, socket_id);
3019 if (!txq->free) {
3020 hns3_err(hw, "failed to allocate tx mbuf free array!");
3021 hns3_tx_queue_release(txq);
3022 return -ENOMEM;
3023 }
3024
3025 txq->port_id = dev->data->port_id;
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
3036 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
3037 HNS3_PORT_BASE_VLAN_ENABLE;
3038 else
3039 txq->pvid_sw_shift_en = false;
3040 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
3041 txq->configured = true;
3042 txq->io_base = (void *)((char *)hw->io_base +
3043 hns3_get_tqp_reg_offset(idx));
3044 txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
3045 HNS3_RING_TX_TAIL_REG);
3046 txq->min_tx_pkt_len = hw->min_tx_pkt_len;
3047 txq->tso_mode = hw->tso_mode;
3048 txq->udp_cksum_mode = hw->udp_cksum_mode;
3049 memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
3050 memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
3051
3052
3053
3054
3055
3056 hns3_tx_push_queue_init(dev, idx, txq);
3057
3058 rte_spinlock_lock(&hw->lock);
3059 dev->data->tx_queues[idx] = txq;
3060 rte_spinlock_unlock(&hw->lock);
3061
3062 return 0;
3063}
3064
3065static void
3066hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
3067{
3068 uint16_t tx_next_clean = txq->next_to_clean;
3069 uint16_t tx_next_use = txq->next_to_use;
3070 uint16_t tx_bd_ready = txq->tx_bd_ready;
3071 uint16_t tx_bd_max = txq->nb_tx_desc;
3072 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
3073 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
3074 struct rte_mbuf *mbuf;
3075
3076 while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
3077 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
3078 tx_next_use != tx_next_clean) {
3079 mbuf = tx_bak_pkt->mbuf;
3080 if (mbuf) {
3081 rte_pktmbuf_free_seg(mbuf);
3082 tx_bak_pkt->mbuf = NULL;
3083 }
3084
3085 desc++;
3086 tx_bak_pkt++;
3087 tx_next_clean++;
3088 tx_bd_ready++;
3089
3090 if (tx_next_clean >= tx_bd_max) {
3091 tx_next_clean = 0;
3092 desc = txq->tx_ring;
3093 tx_bak_pkt = txq->sw_ring;
3094 }
3095 }
3096
3097 txq->next_to_clean = tx_next_clean;
3098 txq->tx_bd_ready = tx_bd_ready;
3099}
3100
3101int
3102hns3_config_gro(struct hns3_hw *hw, bool en)
3103{
3104 struct hns3_cfg_gro_status_cmd *req;
3105 struct hns3_cmd_desc desc;
3106 int ret;
3107
3108 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
3109 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
3110
3111 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
3112
3113 ret = hns3_cmd_send(hw, &desc, 1);
3114 if (ret)
3115 hns3_err(hw, "%s hardware GRO failed, ret = %d",
3116 en ? "enable" : "disable", ret);
3117
3118 return ret;
3119}
3120
3121int
3122hns3_restore_gro_conf(struct hns3_hw *hw)
3123{
3124 uint64_t offloads;
3125 bool gro_en;
3126 int ret;
3127
3128 offloads = hw->data->dev_conf.rxmode.offloads;
3129 gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
3130 ret = hns3_config_gro(hw, gro_en);
3131 if (ret)
3132 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
3133 gro_en ? "enabled" : "disabled", ret);
3134
3135 return ret;
3136}
3137
3138static inline bool
3139hns3_pkt_is_tso(struct rte_mbuf *m)
3140{
3141 return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
3142}
3143
3144static void
3145hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
3146{
3147 if (!hns3_pkt_is_tso(rxm))
3148 return;
3149
3150 if (paylen <= rxm->tso_segsz)
3151 return;
3152
3153 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
3154 desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
3155}
3156
3157static inline void
3158hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
3159{
3160 desc->addr = rte_mbuf_data_iova(rxm);
3161 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
3162 desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
3163}
3164
3165static void
3166hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
3167 struct rte_mbuf *rxm)
3168{
3169 uint64_t ol_flags = rxm->ol_flags;
3170 uint32_t hdr_len;
3171 uint32_t paylen;
3172
3173 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
3174 hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
3175 rxm->outer_l2_len + rxm->outer_l3_len : 0;
3176 paylen = rxm->pkt_len - hdr_len;
3177 desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
3178 hns3_set_tso(desc, paylen, rxm);
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192 if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
3193 PKT_TX_QINQ_PKT)) {
3194 desc->tx.ol_type_vlan_len_msec |=
3195 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
3196 if (ol_flags & PKT_TX_QINQ_PKT)
3197 desc->tx.outer_vlan_tag =
3198 rte_cpu_to_le_16(rxm->vlan_tci_outer);
3199 else
3200 desc->tx.outer_vlan_tag =
3201 rte_cpu_to_le_16(rxm->vlan_tci);
3202 }
3203
3204 if (ol_flags & PKT_TX_QINQ_PKT ||
3205 ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
3206 desc->tx.type_cs_vlan_tso_len |=
3207 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
3208 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
3209 }
3210
3211 if (ol_flags & PKT_TX_IEEE1588_TMST)
3212 desc->tx.tp_fe_sc_vld_ra_ri |=
3213 rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
3214}
3215
3216static inline int
3217hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
3218 struct rte_mbuf **alloc_mbuf)
3219{
3220#define MAX_NON_TSO_BD_PER_PKT 18
3221 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
3222 uint16_t i;
3223
3224
3225 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
3226 return -ENOMEM;
3227
3228 for (i = 0; i < nb_new_buf - 1; i++)
3229 pkt_segs[i]->next = pkt_segs[i + 1];
3230
3231 pkt_segs[nb_new_buf - 1]->next = NULL;
3232 pkt_segs[0]->nb_segs = nb_new_buf;
3233 *alloc_mbuf = pkt_segs[0];
3234
3235 return 0;
3236}
3237
3238static inline void
3239hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
3240{
3241 new_pkt->ol_flags = old_pkt->ol_flags;
3242 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
3243 new_pkt->outer_l2_len = old_pkt->outer_l2_len;
3244 new_pkt->outer_l3_len = old_pkt->outer_l3_len;
3245 new_pkt->l2_len = old_pkt->l2_len;
3246 new_pkt->l3_len = old_pkt->l3_len;
3247 new_pkt->l4_len = old_pkt->l4_len;
3248 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
3249 new_pkt->vlan_tci = old_pkt->vlan_tci;
3250}
3251
3252static int
3253hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
3254 uint8_t max_non_tso_bd_num)
3255{
3256 struct rte_mempool *mb_pool;
3257 struct rte_mbuf *new_mbuf;
3258 struct rte_mbuf *temp_new;
3259 struct rte_mbuf *temp;
3260 uint16_t last_buf_len;
3261 uint16_t nb_new_buf;
3262 uint16_t buf_size;
3263 uint16_t buf_len;
3264 uint16_t len_s;
3265 uint16_t len_d;
3266 uint16_t len;
3267 int ret;
3268 char *s;
3269 char *d;
3270
3271 mb_pool = tx_pkt->pool;
3272 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
3273 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
3274 if (nb_new_buf > max_non_tso_bd_num)
3275 return -EINVAL;
3276
3277 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
3278 if (last_buf_len == 0)
3279 last_buf_len = buf_size;
3280
3281
3282 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
3283 if (ret)
3284 return ret;
3285
3286
3287 temp = tx_pkt;
3288 s = rte_pktmbuf_mtod(temp, char *);
3289 len_s = rte_pktmbuf_data_len(temp);
3290 temp_new = new_mbuf;
3291 while (temp != NULL && temp_new != NULL) {
3292 d = rte_pktmbuf_mtod(temp_new, char *);
3293 buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
3294 len_d = buf_len;
3295
3296 while (len_d) {
3297 len = RTE_MIN(len_s, len_d);
3298 memcpy(d, s, len);
3299 s = s + len;
3300 d = d + len;
3301 len_d = len_d - len;
3302 len_s = len_s - len;
3303
3304 if (len_s == 0) {
3305 temp = temp->next;
3306 if (temp == NULL)
3307 break;
3308 s = rte_pktmbuf_mtod(temp, char *);
3309 len_s = rte_pktmbuf_data_len(temp);
3310 }
3311 }
3312
3313 temp_new->data_len = buf_len;
3314 temp_new = temp_new->next;
3315 }
3316 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
3317
3318
3319 rte_pktmbuf_free(tx_pkt);
3320
3321 *new_pkt = new_mbuf;
3322
3323 return 0;
3324}
3325
3326static void
3327hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
3328{
3329 uint32_t tmp = *ol_type_vlan_len_msec;
3330 uint64_t ol_flags = m->ol_flags;
3331
3332
3333 if (ol_flags & PKT_TX_OUTER_IPV4) {
3334 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3335 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3336 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
3337 else
3338 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3339 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
3340 } else if (ol_flags & PKT_TX_OUTER_IPV6) {
3341 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
3342 HNS3_OL3T_IPV6);
3343 }
3344
3345 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3346 m->outer_l3_len >> HNS3_L3_LEN_UNIT);
3347 *ol_type_vlan_len_msec = tmp;
3348}
3349
3350static int
3351hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
3352 uint32_t *type_cs_vlan_tso_len)
3353{
3354#define HNS3_NVGRE_HLEN 8
3355 uint32_t tmp_outer = *ol_type_vlan_len_msec;
3356 uint32_t tmp_inner = *type_cs_vlan_tso_len;
3357 uint64_t ol_flags = m->ol_flags;
3358 uint16_t inner_l2_len;
3359
3360 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
3361 case PKT_TX_TUNNEL_VXLAN_GPE:
3362 case PKT_TX_TUNNEL_GENEVE:
3363 case PKT_TX_TUNNEL_VXLAN:
3364
3365 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3366 HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
3367
3368
3369
3370
3371
3372
3373
3374
3375 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3376 HNS3_TXD_L4LEN_S,
3377 (uint8_t)RTE_ETHER_VXLAN_HLEN >>
3378 HNS3_L4_LEN_UNIT);
3379
3380 inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
3381 break;
3382 case PKT_TX_TUNNEL_GRE:
3383 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3384 HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
3385
3386
3387
3388
3389 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3390 HNS3_TXD_L4LEN_S,
3391 (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
3392
3393 inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
3394 break;
3395 default:
3396
3397 return -EINVAL;
3398 }
3399
3400 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3401 inner_l2_len >> HNS3_L2_LEN_UNIT);
3402
3403 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3404 m->outer_l2_len >> HNS3_L2_LEN_UNIT);
3405
3406 *type_cs_vlan_tso_len = tmp_inner;
3407 *ol_type_vlan_len_msec = tmp_outer;
3408
3409 return 0;
3410}
3411
3412static int
3413hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3414 uint16_t tx_desc_id)
3415{
3416 struct hns3_desc *tx_ring = txq->tx_ring;
3417 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3418 uint64_t ol_flags = m->ol_flags;
3419 uint32_t tmp_outer = 0;
3420 uint32_t tmp_inner = 0;
3421 uint32_t tmp_ol4cs;
3422 int ret;
3423
3424
3425
3426
3427
3428
3429
3430
3431 if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
3432
3433
3434
3435
3436
3437 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
3438 HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
3439 } else {
3440
3441
3442
3443
3444
3445
3446 if (unlikely(!(ol_flags &
3447 (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
3448 m->outer_l2_len == 0)) {
3449 struct rte_net_hdr_lens hdr_len;
3450 (void)rte_net_get_ptype(m, &hdr_len,
3451 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
3452 m->outer_l3_len = hdr_len.l3_len;
3453 m->outer_l2_len = hdr_len.l2_len;
3454 m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
3455 }
3456 hns3_parse_outer_params(m, &tmp_outer);
3457 ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
3458 if (ret)
3459 return -EINVAL;
3460 }
3461
3462 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
3463 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
3464 tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
3465 BIT(HNS3_TXD_OL4CS_B) : 0;
3466 desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
3467
3468 return 0;
3469}
3470
3471static void
3472hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3473{
3474 uint64_t ol_flags = m->ol_flags;
3475 uint32_t l3_type;
3476 uint32_t tmp;
3477
3478 tmp = *type_cs_vlan_tso_len;
3479 if (ol_flags & PKT_TX_IPV4)
3480 l3_type = HNS3_L3T_IPV4;
3481 else if (ol_flags & PKT_TX_IPV6)
3482 l3_type = HNS3_L3T_IPV6;
3483 else
3484 l3_type = HNS3_L3T_NONE;
3485
3486
3487 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3488 m->l3_len >> HNS3_L3_LEN_UNIT);
3489
3490 tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
3491
3492
3493 if (ol_flags & PKT_TX_IP_CKSUM)
3494 tmp |= BIT(HNS3_TXD_L3CS_B);
3495 *type_cs_vlan_tso_len = tmp;
3496}
3497
3498static void
3499hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3500{
3501 uint64_t ol_flags = m->ol_flags;
3502 uint32_t tmp;
3503
3504 switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
3505 case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
3506 case PKT_TX_TCP_CKSUM:
3507 case PKT_TX_TCP_SEG:
3508 tmp = *type_cs_vlan_tso_len;
3509 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3510 HNS3_L4T_TCP);
3511 break;
3512 case PKT_TX_UDP_CKSUM:
3513 tmp = *type_cs_vlan_tso_len;
3514 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3515 HNS3_L4T_UDP);
3516 break;
3517 case PKT_TX_SCTP_CKSUM:
3518 tmp = *type_cs_vlan_tso_len;
3519 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3520 HNS3_L4T_SCTP);
3521 break;
3522 default:
3523 return;
3524 }
3525 tmp |= BIT(HNS3_TXD_L4CS_B);
3526 tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3527 m->l4_len >> HNS3_L4_LEN_UNIT);
3528 *type_cs_vlan_tso_len = tmp;
3529}
3530
3531static void
3532hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3533 uint16_t tx_desc_id)
3534{
3535 struct hns3_desc *tx_ring = txq->tx_ring;
3536 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3537 uint32_t value = 0;
3538
3539 hns3_parse_l3_cksum_params(m, &value);
3540 hns3_parse_l4_cksum_params(m, &value);
3541
3542 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3543}
3544
3545static bool
3546hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3547 uint32_t max_non_tso_bd_num)
3548{
3549 struct rte_mbuf *m_first = tx_pkts;
3550 struct rte_mbuf *m_last = tx_pkts;
3551 uint32_t tot_len = 0;
3552 uint32_t hdr_len;
3553 uint32_t i;
3554
3555
3556
3557
3558
3559
3560
3561
3562 if (bd_num <= max_non_tso_bd_num)
3563 return false;
3564
3565 for (i = 0; m_last && i < max_non_tso_bd_num - 1;
3566 i++, m_last = m_last->next)
3567 tot_len += m_last->data_len;
3568
3569 if (!m_last)
3570 return true;
3571
3572
3573 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3574 hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
3575 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
3576 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
3577 return true;
3578
3579
3580
3581
3582
3583 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
3584 tot_len -= m_first->data_len;
3585 tot_len += m_last->data_len;
3586
3587 if (tot_len < tx_pkts->tso_segsz)
3588 return true;
3589
3590 m_first = m_first->next;
3591 m_last = m_last->next;
3592 }
3593
3594 return false;
3595}
3596
3597static bool
3598hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3599 uint32_t *l4_proto)
3600{
3601 struct rte_ipv4_hdr *ipv4_hdr;
3602 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
3603 m->outer_l2_len);
3604 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
3605 ipv4_hdr->hdr_checksum = 0;
3606 if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
3607 struct rte_udp_hdr *udp_hdr;
3608
3609
3610
3611
3612 if (ol_flags & PKT_TX_TCP_SEG)
3613 return true;
3614 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3615 m->outer_l2_len + m->outer_l3_len);
3616 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
3617
3618 return true;
3619 }
3620 *l4_proto = ipv4_hdr->next_proto_id;
3621 return false;
3622}
3623
3624static bool
3625hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
3626 uint32_t *l4_proto)
3627{
3628 struct rte_ipv6_hdr *ipv6_hdr;
3629 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
3630 m->outer_l2_len);
3631 if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
3632 struct rte_udp_hdr *udp_hdr;
3633
3634
3635
3636
3637 if (ol_flags & PKT_TX_TCP_SEG)
3638 return true;
3639 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3640 m->outer_l2_len + m->outer_l3_len);
3641 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
3642
3643 return true;
3644 }
3645 *l4_proto = ipv6_hdr->proto;
3646 return false;
3647}
3648
3649static void
3650hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
3651{
3652 uint64_t ol_flags = m->ol_flags;
3653 uint32_t paylen, hdr_len, l4_proto;
3654 struct rte_udp_hdr *udp_hdr;
3655
3656 if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
3657 return;
3658
3659 if (ol_flags & PKT_TX_OUTER_IPV4) {
3660 if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
3661 return;
3662 } else {
3663 if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto))
3664 return;
3665 }
3666
3667
3668 if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
3669 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3670 hdr_len += m->outer_l2_len + m->outer_l3_len;
3671 paylen = m->pkt_len - hdr_len;
3672 if (paylen <= m->tso_segsz)
3673 return;
3674 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3675 m->outer_l2_len +
3676 m->outer_l3_len);
3677 udp_hdr->dgram_cksum = 0;
3678 }
3679}
3680
3681static int
3682hns3_check_tso_pkt_valid(struct rte_mbuf *m)
3683{
3684 uint32_t tmp_data_len_sum = 0;
3685 uint16_t nb_buf = m->nb_segs;
3686 uint32_t paylen, hdr_len;
3687 struct rte_mbuf *m_seg;
3688 int i;
3689
3690 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
3691 return -EINVAL;
3692
3693 hdr_len = m->l2_len + m->l3_len + m->l4_len;
3694 hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
3695 m->outer_l2_len + m->outer_l3_len : 0;
3696 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
3697 return -EINVAL;
3698
3699 paylen = m->pkt_len - hdr_len;
3700 if (paylen > HNS3_MAX_BD_PAYLEN)
3701 return -EINVAL;
3702
3703
3704
3705
3706
3707
3708 m_seg = m;
3709 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
3710 i++, m_seg = m_seg->next) {
3711 tmp_data_len_sum += m_seg->data_len;
3712 }
3713
3714 if (hdr_len > tmp_data_len_sum)
3715 return -EINVAL;
3716
3717 return 0;
3718}
3719
3720#ifdef RTE_LIBRTE_ETHDEV_DEBUG
3721static inline int
3722hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3723{
3724 struct rte_ether_hdr *eh;
3725 struct rte_vlan_hdr *vh;
3726
3727 if (!txq->pvid_sw_shift_en)
3728 return 0;
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744 if (m->ol_flags & PKT_TX_QINQ_PKT)
3745 return -EINVAL;
3746
3747 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3748 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3749 if (m->ol_flags & PKT_TX_VLAN_PKT)
3750 return -EINVAL;
3751
3752
3753 vh = (struct rte_vlan_hdr *)(eh + 1);
3754 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3755 return -EINVAL;
3756 }
3757
3758 return 0;
3759}
3760#endif
3761
3762static uint16_t
3763hns3_udp_cksum_help(struct rte_mbuf *m)
3764{
3765 uint64_t ol_flags = m->ol_flags;
3766 uint16_t cksum = 0;
3767 uint32_t l4_len;
3768
3769 if (ol_flags & PKT_TX_IPV4) {
3770 struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
3771 struct rte_ipv4_hdr *, m->l2_len);
3772 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
3773 } else {
3774 struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
3775 struct rte_ipv6_hdr *, m->l2_len);
3776 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
3777 }
3778
3779 rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
3780
3781 cksum = ~cksum;
3782
3783
3784
3785
3786 if (cksum == 0)
3787 cksum = 0xffff;
3788
3789 return (uint16_t)cksum;
3790}
3791
3792static bool
3793hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3794{
3795 uint64_t ol_flags = m->ol_flags;
3796 struct rte_udp_hdr *udp_hdr;
3797 uint16_t dst_port;
3798
3799 if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
3800 ol_flags & PKT_TX_TUNNEL_MASK ||
3801 (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
3802 return true;
3803
3804
3805
3806
3807
3808
3809
3810 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
3811 m->l2_len + m->l3_len);
3812 dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
3813 switch (dst_port) {
3814 case RTE_VXLAN_DEFAULT_PORT:
3815 case RTE_VXLAN_GPE_DEFAULT_PORT:
3816 case RTE_GENEVE_DEFAULT_PORT:
3817 udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
3818 m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
3819 return false;
3820 default:
3821 return true;
3822 }
3823}
3824
3825static int
3826hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3827{
3828 int ret;
3829
3830#ifdef RTE_LIBRTE_ETHDEV_DEBUG
3831 ret = rte_validate_tx_offload(m);
3832 if (ret != 0) {
3833 rte_errno = -ret;
3834 return ret;
3835 }
3836
3837 ret = hns3_vld_vlan_chk(tx_queue, m);
3838 if (ret != 0) {
3839 rte_errno = EINVAL;
3840 return ret;
3841 }
3842#endif
3843 if (hns3_pkt_is_tso(m)) {
3844 if (hns3_pkt_need_linearized(m, m->nb_segs,
3845 tx_queue->max_non_tso_bd_num) ||
3846 hns3_check_tso_pkt_valid(m)) {
3847 rte_errno = EINVAL;
3848 return -EINVAL;
3849 }
3850
3851 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3852
3853
3854
3855
3856
3857
3858 hns3_outer_header_cksum_prepare(m);
3859 return 0;
3860 }
3861 }
3862
3863 ret = rte_net_intel_cksum_prepare(m);
3864 if (ret != 0) {
3865 rte_errno = -ret;
3866 return ret;
3867 }
3868
3869 if (!hns3_validate_tunnel_cksum(tx_queue, m))
3870 return 0;
3871
3872 hns3_outer_header_cksum_prepare(m);
3873
3874 return 0;
3875}
3876
3877uint16_t
3878hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3879 uint16_t nb_pkts)
3880{
3881 struct rte_mbuf *m;
3882 uint16_t i;
3883
3884 for (i = 0; i < nb_pkts; i++) {
3885 m = tx_pkts[i];
3886 if (hns3_prep_pkt_proc(tx_queue, m))
3887 return i;
3888 }
3889
3890 return i;
3891}
3892
3893static int
3894hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3895 struct rte_mbuf *m)
3896{
3897 struct hns3_desc *tx_ring = txq->tx_ring;
3898 struct hns3_desc *desc = &tx_ring[tx_desc_id];
3899
3900
3901 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
3902
3903 if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
3904 txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
3905 return -EINVAL;
3906 }
3907
3908 hns3_txd_enable_checksum(txq, m, tx_desc_id);
3909 } else {
3910
3911 desc->tx.type_cs_vlan_tso_len = 0;
3912 desc->tx.ol_type_vlan_len_msec = 0;
3913 }
3914
3915 return 0;
3916}
3917
3918static int
3919hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
3920 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
3921{
3922 uint8_t max_non_tso_bd_num;
3923 struct rte_mbuf *new_pkt;
3924 int ret;
3925
3926 if (hns3_pkt_is_tso(*m_seg))
3927 return 0;
3928
3929
3930
3931
3932
3933 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
3934 txq->dfx_stats.over_length_pkt_cnt++;
3935 return -EINVAL;
3936 }
3937
3938 max_non_tso_bd_num = txq->max_non_tso_bd_num;
3939 if (unlikely(nb_buf > max_non_tso_bd_num)) {
3940 txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
3941 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3942 max_non_tso_bd_num);
3943 if (ret) {
3944 txq->dfx_stats.exceed_limit_bd_reassem_fail++;
3945 return ret;
3946 }
3947 *m_seg = new_pkt;
3948 }
3949
3950 return 0;
3951}
3952
3953static inline void
3954hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
3955{
3956 struct hns3_entry *tx_entry;
3957 struct hns3_desc *desc;
3958 uint16_t tx_next_clean;
3959 int i;
3960
3961 while (1) {
3962 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
3963 break;
3964
3965
3966
3967
3968
3969 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
3970 txq->nb_tx_desc;
3971 desc = &txq->tx_ring[tx_next_clean];
3972 for (i = 0; i < txq->tx_rs_thresh; i++) {
3973 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
3974 BIT(HNS3_TXD_VLD_B))
3975 return;
3976 desc--;
3977 }
3978
3979 tx_entry = &txq->sw_ring[txq->next_to_clean];
3980
3981 for (i = 0; i < txq->tx_rs_thresh; i++)
3982 rte_prefetch0((tx_entry + i)->mbuf);
3983 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
3984 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf);
3985 tx_entry->mbuf = NULL;
3986 }
3987
3988 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
3989 txq->tx_bd_ready += txq->tx_rs_thresh;
3990 }
3991}
3992
3993static inline void
3994hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
3995{
3996 tx_entry->mbuf = pkts[0];
3997}
3998
3999static inline void
4000hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
4001{
4002 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
4003 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
4004 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
4005 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
4006}
4007
4008static inline void
4009hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
4010{
4011#define PER_LOOP_NUM 4
4012 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
4013 uint64_t dma_addr;
4014 uint32_t i;
4015
4016 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
4017 dma_addr = rte_mbuf_data_iova(*pkts);
4018 txdp->addr = rte_cpu_to_le_64(dma_addr);
4019 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
4020 txdp->tx.paylen_fd_dop_ol4cs = 0;
4021 txdp->tx.type_cs_vlan_tso_len = 0;
4022 txdp->tx.ol_type_vlan_len_msec = 0;
4023 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
4024 }
4025}
4026
4027static inline void
4028hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
4029{
4030 const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
4031 uint64_t dma_addr;
4032
4033 dma_addr = rte_mbuf_data_iova(*pkts);
4034 txdp->addr = rte_cpu_to_le_64(dma_addr);
4035 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
4036 txdp->tx.paylen_fd_dop_ol4cs = 0;
4037 txdp->tx.type_cs_vlan_tso_len = 0;
4038 txdp->tx.ol_type_vlan_len_msec = 0;
4039 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
4040}
4041
4042static inline void
4043hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
4044 struct rte_mbuf **pkts,
4045 uint16_t nb_pkts)
4046{
4047#define PER_LOOP_NUM 4
4048#define PER_LOOP_MASK (PER_LOOP_NUM - 1)
4049 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
4050 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
4051 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
4052 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
4053 uint32_t i;
4054
4055 for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
4056 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
4057 hns3_tx_setup_4bd(txdp + i, pkts + i);
4058
4059
4060 uint32_t j;
4061 for (j = 0; j < PER_LOOP_NUM; j++)
4062 txq->basic_stats.bytes += pkts[i + j]->pkt_len;
4063 }
4064 if (unlikely(leftover > 0)) {
4065 for (i = 0; i < leftover; i++) {
4066 hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
4067 pkts + mainpart + i);
4068 hns3_tx_setup_1bd(txdp + mainpart + i,
4069 pkts + mainpart + i);
4070
4071
4072 txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
4073 }
4074 }
4075}
4076
4077uint16_t
4078hns3_xmit_pkts_simple(void *tx_queue,
4079 struct rte_mbuf **tx_pkts,
4080 uint16_t nb_pkts)
4081{
4082 struct hns3_tx_queue *txq = tx_queue;
4083 uint16_t nb_tx = 0;
4084
4085 hns3_tx_free_buffer_simple(txq);
4086
4087 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
4088 if (unlikely(nb_pkts == 0)) {
4089 if (txq->tx_bd_ready == 0)
4090 txq->dfx_stats.queue_full_cnt++;
4091 return 0;
4092 }
4093
4094 txq->tx_bd_ready -= nb_pkts;
4095 if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
4096 nb_tx = txq->nb_tx_desc - txq->next_to_use;
4097 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
4098 txq->next_to_use = 0;
4099 }
4100
4101 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
4102 txq->next_to_use += nb_pkts - nb_tx;
4103
4104 hns3_write_txq_tail_reg(txq, nb_pkts);
4105
4106 return nb_pkts;
4107}
4108
4109uint16_t
4110hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4111{
4112 struct hns3_tx_queue *txq = tx_queue;
4113 struct hns3_entry *tx_bak_pkt;
4114 struct hns3_desc *tx_ring;
4115 struct rte_mbuf *tx_pkt;
4116 struct rte_mbuf *m_seg;
4117 struct hns3_desc *desc;
4118 uint32_t nb_hold = 0;
4119 uint16_t tx_next_use;
4120 uint16_t tx_pkt_num;
4121 uint16_t tx_bd_max;
4122 uint16_t nb_buf;
4123 uint16_t nb_tx;
4124 uint16_t i;
4125
4126
4127 hns3_tx_free_useless_buffer(txq);
4128
4129 tx_next_use = txq->next_to_use;
4130 tx_bd_max = txq->nb_tx_desc;
4131 tx_pkt_num = nb_pkts;
4132 tx_ring = txq->tx_ring;
4133
4134
4135 tx_bak_pkt = &txq->sw_ring[tx_next_use];
4136 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
4137 tx_pkt = *tx_pkts++;
4138
4139 nb_buf = tx_pkt->nb_segs;
4140
4141 if (nb_buf > txq->tx_bd_ready) {
4142 txq->dfx_stats.queue_full_cnt++;
4143 if (nb_tx == 0)
4144 return 0;
4145
4146 goto end_of_tx;
4147 }
4148
4149
4150
4151
4152
4153
4154 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
4155 txq->min_tx_pkt_len)) {
4156 uint16_t add_len;
4157 char *appended;
4158
4159 add_len = txq->min_tx_pkt_len -
4160 rte_pktmbuf_pkt_len(tx_pkt);
4161 appended = rte_pktmbuf_append(tx_pkt, add_len);
4162 if (appended == NULL) {
4163 txq->dfx_stats.pkt_padding_fail_cnt++;
4164 break;
4165 }
4166
4167 memset(appended, 0, add_len);
4168 }
4169
4170 m_seg = tx_pkt;
4171
4172 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
4173 goto end_of_tx;
4174
4175 if (hns3_parse_cksum(txq, tx_next_use, m_seg))
4176 goto end_of_tx;
4177
4178 i = 0;
4179 desc = &tx_ring[tx_next_use];
4180
4181
4182
4183
4184
4185
4186 hns3_fill_first_desc(txq, desc, m_seg);
4187
4188 do {
4189 desc = &tx_ring[tx_next_use];
4190
4191
4192
4193
4194 hns3_fill_per_desc(desc, m_seg);
4195 tx_bak_pkt->mbuf = m_seg;
4196 m_seg = m_seg->next;
4197 tx_next_use++;
4198 tx_bak_pkt++;
4199 if (tx_next_use >= tx_bd_max) {
4200 tx_next_use = 0;
4201 tx_bak_pkt = txq->sw_ring;
4202 }
4203
4204 i++;
4205 } while (m_seg != NULL);
4206
4207
4208 desc->tx.tp_fe_sc_vld_ra_ri |=
4209 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
4210
4211
4212 txq->basic_stats.bytes += tx_pkt->pkt_len;
4213 nb_hold += i;
4214 txq->next_to_use = tx_next_use;
4215 txq->tx_bd_ready -= i;
4216 }
4217
4218end_of_tx:
4219
4220 if (likely(nb_tx))
4221 hns3_write_txq_tail_reg(txq, nb_hold);
4222
4223 return nb_tx;
4224}
4225
4226int __rte_weak
4227hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
4228{
4229 return -ENOTSUP;
4230}
4231
4232uint16_t __rte_weak
4233hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
4234 __rte_unused struct rte_mbuf **tx_pkts,
4235 __rte_unused uint16_t nb_pkts)
4236{
4237 return 0;
4238}
4239
4240uint16_t __rte_weak
4241hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
4242 struct rte_mbuf __rte_unused **tx_pkts,
4243 uint16_t __rte_unused nb_pkts)
4244{
4245 return 0;
4246}
4247
4248int
4249hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4250 struct rte_eth_burst_mode *mode)
4251{
4252 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4253 const char *info = NULL;
4254
4255 if (pkt_burst == hns3_xmit_pkts_simple)
4256 info = "Scalar Simple";
4257 else if (pkt_burst == hns3_xmit_pkts)
4258 info = "Scalar";
4259 else if (pkt_burst == hns3_xmit_pkts_vec)
4260 info = "Vector Neon";
4261 else if (pkt_burst == hns3_xmit_pkts_vec_sve)
4262 info = "Vector Sve";
4263
4264 if (info == NULL)
4265 return -EINVAL;
4266
4267 snprintf(mode->info, sizeof(mode->info), "%s", info);
4268
4269 return 0;
4270}
4271
4272static bool
4273hns3_tx_check_simple_support(struct rte_eth_dev *dev)
4274{
4275 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
4276
4277 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4278 if (hns3_dev_ptp_supported(hw))
4279 return false;
4280
4281 return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
4282}
4283
4284static bool
4285hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
4286{
4287#ifdef RTE_LIBRTE_ETHDEV_DEBUG
4288 RTE_SET_USED(dev);
4289
4290 return true;
4291#else
4292#define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
4293 DEV_TX_OFFLOAD_IPV4_CKSUM | \
4294 DEV_TX_OFFLOAD_TCP_CKSUM | \
4295 DEV_TX_OFFLOAD_UDP_CKSUM | \
4296 DEV_TX_OFFLOAD_SCTP_CKSUM | \
4297 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
4298 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
4299 DEV_TX_OFFLOAD_TCP_TSO | \
4300 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
4301 DEV_TX_OFFLOAD_GRE_TNL_TSO | \
4302 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
4303
4304 uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
4305 if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
4306 return true;
4307
4308 return false;
4309#endif
4310}
4311
4312static eth_tx_burst_t
4313hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
4314{
4315 struct hns3_adapter *hns = dev->data->dev_private;
4316 bool vec_allowed, sve_allowed, simple_allowed;
4317 bool vec_support, tx_prepare_needed;
4318
4319 vec_support = hns3_tx_check_vec_support(dev) == 0;
4320 vec_allowed = vec_support && hns3_get_default_vec_support();
4321 sve_allowed = vec_support && hns3_get_sve_support();
4322 simple_allowed = hns3_tx_check_simple_support(dev);
4323 tx_prepare_needed = hns3_get_tx_prep_needed(dev);
4324
4325 *prep = NULL;
4326
4327 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
4328 return hns3_xmit_pkts_vec;
4329 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
4330 return hns3_xmit_pkts_vec_sve;
4331 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
4332 return hns3_xmit_pkts_simple;
4333 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
4334 if (tx_prepare_needed)
4335 *prep = hns3_prep_pkts;
4336 return hns3_xmit_pkts;
4337 }
4338
4339 if (vec_allowed)
4340 return hns3_xmit_pkts_vec;
4341 if (simple_allowed)
4342 return hns3_xmit_pkts_simple;
4343
4344 if (tx_prepare_needed)
4345 *prep = hns3_prep_pkts;
4346 return hns3_xmit_pkts;
4347}
4348
4349static uint16_t
4350hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
4351 struct rte_mbuf **pkts __rte_unused,
4352 uint16_t pkts_n __rte_unused)
4353{
4354 return 0;
4355}
4356
4357static void
4358hns3_trace_rxtx_function(struct rte_eth_dev *dev)
4359{
4360 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4361 struct rte_eth_burst_mode rx_mode;
4362 struct rte_eth_burst_mode tx_mode;
4363
4364 memset(&rx_mode, 0, sizeof(rx_mode));
4365 memset(&tx_mode, 0, sizeof(tx_mode));
4366 (void)hns3_rx_burst_mode_get(dev, 0, &rx_mode);
4367 (void)hns3_tx_burst_mode_get(dev, 0, &tx_mode);
4368
4369 hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.",
4370 rx_mode.info, tx_mode.info);
4371}
4372
4373void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
4374{
4375 struct hns3_adapter *hns = eth_dev->data->dev_private;
4376 eth_tx_prep_t prep = NULL;
4377
4378 if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
4379 __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
4380 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
4381 eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
4382 eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
4383 eth_dev->tx_pkt_prepare = prep;
4384 eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
4385 hns3_trace_rxtx_function(eth_dev);
4386 } else {
4387 eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
4388 eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
4389 eth_dev->tx_pkt_prepare = NULL;
4390 }
4391}
4392
4393void
4394hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4395 struct rte_eth_rxq_info *qinfo)
4396{
4397 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
4398
4399 qinfo->mp = rxq->mb_pool;
4400 qinfo->nb_desc = rxq->nb_rx_desc;
4401 qinfo->scattered_rx = dev->data->scattered_rx;
4402
4403 qinfo->rx_buf_size = rxq->rx_buf_len;
4404
4405
4406
4407
4408
4409 qinfo->conf.rx_drop_en = 1;
4410 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
4411 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4412 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4413}
4414
4415void
4416hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4417 struct rte_eth_txq_info *qinfo)
4418{
4419 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
4420
4421 qinfo->nb_desc = txq->nb_tx_desc;
4422 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
4423 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
4424 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4425 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4426}
4427
4428int
4429hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4430{
4431 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4432 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4433 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4434 int ret;
4435
4436 if (!hns3_dev_indep_txrx_supported(hw))
4437 return -ENOTSUP;
4438
4439 rte_spinlock_lock(&hw->lock);
4440 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
4441 if (ret) {
4442 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
4443 rx_queue_id, ret);
4444 rte_spinlock_unlock(&hw->lock);
4445 return ret;
4446 }
4447
4448 ret = hns3_init_rxq(hns, rx_queue_id);
4449 if (ret) {
4450 hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
4451 rx_queue_id, ret);
4452 rte_spinlock_unlock(&hw->lock);
4453 return ret;
4454 }
4455
4456 hns3_enable_rxq(rxq, true);
4457 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4458 rte_spinlock_unlock(&hw->lock);
4459
4460 return ret;
4461}
4462
4463static void
4464hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
4465{
4466 rxq->next_to_use = 0;
4467 rxq->rx_rearm_start = 0;
4468 rxq->rx_free_hold = 0;
4469 rxq->rx_rearm_nb = 0;
4470 rxq->pkt_first_seg = NULL;
4471 rxq->pkt_last_seg = NULL;
4472 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
4473 hns3_rxq_vec_setup(rxq);
4474}
4475
4476int
4477hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4478{
4479 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4480 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4481
4482 if (!hns3_dev_indep_txrx_supported(hw))
4483 return -ENOTSUP;
4484
4485 rte_spinlock_lock(&hw->lock);
4486 hns3_enable_rxq(rxq, false);
4487
4488 hns3_rx_queue_release_mbufs(rxq);
4489
4490 hns3_reset_sw_rxq(rxq);
4491 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4492 rte_spinlock_unlock(&hw->lock);
4493
4494 return 0;
4495}
4496
4497int
4498hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4499{
4500 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4501 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4502 int ret;
4503
4504 if (!hns3_dev_indep_txrx_supported(hw))
4505 return -ENOTSUP;
4506
4507 rte_spinlock_lock(&hw->lock);
4508 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
4509 if (ret) {
4510 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
4511 tx_queue_id, ret);
4512 rte_spinlock_unlock(&hw->lock);
4513 return ret;
4514 }
4515
4516 hns3_init_txq(txq);
4517 hns3_enable_txq(txq, true);
4518 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4519 rte_spinlock_unlock(&hw->lock);
4520
4521 return ret;
4522}
4523
4524int
4525hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4526{
4527 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4528 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4529
4530 if (!hns3_dev_indep_txrx_supported(hw))
4531 return -ENOTSUP;
4532
4533 rte_spinlock_lock(&hw->lock);
4534 hns3_enable_txq(txq, false);
4535 hns3_tx_queue_release_mbufs(txq);
4536
4537
4538
4539
4540
4541
4542
4543 hns3_init_txq(txq);
4544 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4545 rte_spinlock_unlock(&hw->lock);
4546
4547 return 0;
4548}
4549
4550static int
4551hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
4552{
4553 uint16_t next_to_clean = txq->next_to_clean;
4554 uint16_t next_to_use = txq->next_to_use;
4555 uint16_t tx_bd_ready = txq->tx_bd_ready;
4556 struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
4557 struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
4558 uint32_t idx;
4559
4560 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
4561 free_cnt = txq->nb_tx_desc;
4562
4563 for (idx = 0; idx < free_cnt; idx++) {
4564 if (next_to_clean == next_to_use)
4565 break;
4566
4567 if (desc->tx.tp_fe_sc_vld_ra_ri &
4568 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4569 break;
4570
4571 if (tx_pkt->mbuf != NULL) {
4572 rte_pktmbuf_free_seg(tx_pkt->mbuf);
4573 tx_pkt->mbuf = NULL;
4574 }
4575
4576 next_to_clean++;
4577 tx_bd_ready++;
4578 tx_pkt++;
4579 desc++;
4580 if (next_to_clean == txq->nb_tx_desc) {
4581 tx_pkt = txq->sw_ring;
4582 desc = txq->tx_ring;
4583 next_to_clean = 0;
4584 }
4585 }
4586
4587 if (idx > 0) {
4588 txq->next_to_clean = next_to_clean;
4589 txq->tx_bd_ready = tx_bd_ready;
4590 }
4591
4592 return (int)idx;
4593}
4594
4595int
4596hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
4597{
4598 struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
4599 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
4600
4601 if (dev->tx_pkt_burst == hns3_xmit_pkts)
4602 return hns3_tx_done_cleanup_full(q, free_cnt);
4603 else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst)
4604 return 0;
4605 else
4606 return -ENOTSUP;
4607}
4608
4609int
4610hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
4611{
4612 volatile struct hns3_desc *rxdp;
4613 struct hns3_rx_queue *rxq;
4614 struct rte_eth_dev *dev;
4615 uint32_t bd_base_info;
4616 uint16_t desc_id;
4617
4618 rxq = (struct hns3_rx_queue *)rx_queue;
4619 if (offset >= rxq->nb_rx_desc)
4620 return -EINVAL;
4621
4622 desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
4623 rxdp = &rxq->rx_ring[desc_id];
4624 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
4625 dev = &rte_eth_devices[rxq->port_id];
4626 if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
4627 dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
4628 if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
4629 return RTE_ETH_RX_DESC_UNAVAIL;
4630 } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4631 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
4632 if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
4633 return RTE_ETH_RX_DESC_UNAVAIL;
4634 } else {
4635 return RTE_ETH_RX_DESC_UNAVAIL;
4636 }
4637
4638 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4639 return RTE_ETH_RX_DESC_AVAIL;
4640 else
4641 return RTE_ETH_RX_DESC_DONE;
4642}
4643
4644int
4645hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
4646{
4647 volatile struct hns3_desc *txdp;
4648 struct hns3_tx_queue *txq;
4649 struct rte_eth_dev *dev;
4650 uint16_t desc_id;
4651
4652 txq = (struct hns3_tx_queue *)tx_queue;
4653 if (offset >= txq->nb_tx_desc)
4654 return -EINVAL;
4655
4656 dev = &rte_eth_devices[txq->port_id];
4657 if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
4658 dev->tx_pkt_burst != hns3_xmit_pkts &&
4659 dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
4660 dev->tx_pkt_burst != hns3_xmit_pkts_vec)
4661 return RTE_ETH_TX_DESC_UNAVAIL;
4662
4663 desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
4664 txdp = &txq->tx_ring[desc_id];
4665 if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4666 return RTE_ETH_TX_DESC_FULL;
4667 else
4668 return RTE_ETH_TX_DESC_DONE;
4669}
4670
4671uint32_t
4672hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4673{
4674
4675
4676
4677
4678 uint32_t driver_hold_bd_num;
4679 struct hns3_rx_queue *rxq;
4680 uint32_t fbd_num;
4681
4682 rxq = dev->data->rx_queues[rx_queue_id];
4683 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
4684 if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
4685 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
4686 driver_hold_bd_num = rxq->rx_rearm_nb;
4687 else
4688 driver_hold_bd_num = rxq->rx_free_hold;
4689
4690 if (fbd_num <= driver_hold_bd_num)
4691 return 0;
4692 else
4693 return fbd_num - driver_hold_bd_num;
4694}
4695
4696void
4697hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
4698{
4699
4700
4701
4702
4703 if (hns3_dev_rxd_adv_layout_supported(hw))
4704 hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
4705}
4706