1
2
3
4
5#include <stdint.h>
6#include <string.h>
7#include <stdio.h>
8#include <errno.h>
9#include <unistd.h>
10
11#include <ethdev_driver.h>
12#include <ethdev_pci.h>
13#include <rte_memcpy.h>
14#include <rte_string_fns.h>
15#include <rte_malloc.h>
16#include <rte_atomic.h>
17#include <rte_branch_prediction.h>
18#include <rte_pci.h>
19#include <rte_bus_pci.h>
20#include <rte_ether.h>
21#include <rte_common.h>
22#include <rte_cycles.h>
23#include <rte_spinlock.h>
24#include <rte_byteorder.h>
25#include <rte_dev.h>
26#include <rte_memory.h>
27#include <rte_eal.h>
28#include <rte_io.h>
29
30#include "rte_avp_common.h"
31#include "rte_avp_fifo.h"
32
33#include "avp_logs.h"
34
35static int avp_dev_create(struct rte_pci_device *pci_dev,
36 struct rte_eth_dev *eth_dev);
37
38static int avp_dev_configure(struct rte_eth_dev *dev);
39static int avp_dev_start(struct rte_eth_dev *dev);
40static int avp_dev_stop(struct rte_eth_dev *dev);
41static int avp_dev_close(struct rte_eth_dev *dev);
42static int avp_dev_info_get(struct rte_eth_dev *dev,
43 struct rte_eth_dev_info *dev_info);
44static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
45static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
46static int avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
47static int avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
48
49static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
50 uint16_t rx_queue_id,
51 uint16_t nb_rx_desc,
52 unsigned int socket_id,
53 const struct rte_eth_rxconf *rx_conf,
54 struct rte_mempool *pool);
55
56static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
57 uint16_t tx_queue_id,
58 uint16_t nb_tx_desc,
59 unsigned int socket_id,
60 const struct rte_eth_txconf *tx_conf);
61
62static uint16_t avp_recv_scattered_pkts(void *rx_queue,
63 struct rte_mbuf **rx_pkts,
64 uint16_t nb_pkts);
65
66static uint16_t avp_recv_pkts(void *rx_queue,
67 struct rte_mbuf **rx_pkts,
68 uint16_t nb_pkts);
69
70static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
71 struct rte_mbuf **tx_pkts,
72 uint16_t nb_pkts);
73
74static uint16_t avp_xmit_pkts(void *tx_queue,
75 struct rte_mbuf **tx_pkts,
76 uint16_t nb_pkts);
77
78static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
79static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
80
81static int avp_dev_stats_get(struct rte_eth_dev *dev,
82 struct rte_eth_stats *stats);
83static int avp_dev_stats_reset(struct rte_eth_dev *dev);
84
85
86#define AVP_MAX_RX_BURST 64
87#define AVP_MAX_TX_BURST 64
88#define AVP_MAX_MAC_ADDRS 1
89#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN
90
91
92
93
94
95
96#define AVP_REQUEST_DELAY_USECS (5000)
97
98
99
100
101
102#define AVP_MAX_REQUEST_RETRY (100)
103
104
105#define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
106
107
108
109
110static const struct rte_pci_id pci_id_avp_map[] = {
111 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
112 .device_id = RTE_AVP_PCI_DEVICE_ID,
113 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
114 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
115 .class_id = RTE_CLASS_ANY_ID,
116 },
117
118 { .vendor_id = 0,
119 },
120};
121
122
123
124
125static const struct eth_dev_ops avp_eth_dev_ops = {
126 .dev_configure = avp_dev_configure,
127 .dev_start = avp_dev_start,
128 .dev_stop = avp_dev_stop,
129 .dev_close = avp_dev_close,
130 .dev_infos_get = avp_dev_info_get,
131 .vlan_offload_set = avp_vlan_offload_set,
132 .stats_get = avp_dev_stats_get,
133 .stats_reset = avp_dev_stats_reset,
134 .link_update = avp_dev_link_update,
135 .promiscuous_enable = avp_dev_promiscuous_enable,
136 .promiscuous_disable = avp_dev_promiscuous_disable,
137 .rx_queue_setup = avp_dev_rx_queue_setup,
138 .rx_queue_release = avp_dev_rx_queue_release,
139 .tx_queue_setup = avp_dev_tx_queue_setup,
140 .tx_queue_release = avp_dev_tx_queue_release,
141};
142
143
144#define AVP_F_PROMISC (1 << 1)
145#define AVP_F_CONFIGURED (1 << 2)
146#define AVP_F_LINKUP (1 << 3)
147#define AVP_F_DETACHED (1 << 4)
148
149
150
151#define AVP_ETHDEV_MAGIC 0x92972862
152
153
154
155
156
157struct avp_dev {
158 uint32_t magic;
159 uint64_t device_id;
160 struct rte_ether_addr ethaddr;
161 struct rte_eth_dev_data *dev_data;
162
163 volatile uint32_t flags;
164 uint16_t port_id;
165 struct rte_mempool *pool;
166 unsigned int guest_mbuf_size;
167 unsigned int host_mbuf_size;
168 unsigned int max_rx_pkt_len;
169 uint32_t host_features;
170 uint32_t features;
171 unsigned int num_tx_queues;
172 unsigned int max_tx_queues;
173 unsigned int num_rx_queues;
174 unsigned int max_rx_queues;
175
176 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES];
177 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES];
178 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
179
180 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
181
182
183
184 rte_spinlock_t lock;
185
186
187 struct rte_avp_fifo *req_q;
188 struct rte_avp_fifo *resp_q;
189 void *host_sync_addr;
190 void *sync_addr;
191 void *host_mbuf_addr;
192 void *mbuf_addr;
193} __rte_cache_aligned;
194
195
196struct avp_adapter {
197 struct avp_dev avp;
198} __rte_cache_aligned;
199
200
201
202#define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
203
204
205#define AVP_READ32(_addr) rte_read32_relaxed((_addr))
206
207
208#define AVP_DEV_PRIVATE_TO_HW(adapter) \
209 (&((struct avp_adapter *)adapter)->avp)
210
211
212
213
214
215struct avp_queue {
216 struct rte_eth_dev_data *dev_data;
217
218 struct avp_dev *avp;
219 uint16_t queue_id;
220
221 uint16_t queue_base;
222
223 uint16_t queue_limit;
224
225
226 uint64_t packets;
227 uint64_t bytes;
228 uint64_t errors;
229};
230
231
232
233
234
235static int
236avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
237{
238 unsigned int retry = AVP_MAX_REQUEST_RETRY;
239 void *resp_addr = NULL;
240 unsigned int count;
241 int ret;
242
243 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
244
245 request->result = -ENOTSUP;
246
247
248 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
249 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
250
251 rte_memcpy(avp->sync_addr, request, sizeof(*request));
252 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
253 if (count < 1) {
254 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
255 request->req_id);
256 ret = -EBUSY;
257 goto done;
258 }
259
260 while (retry--) {
261
262 usleep(AVP_REQUEST_DELAY_USECS);
263
264 count = avp_fifo_count(avp->resp_q);
265 if (count >= 1) {
266
267 break;
268 }
269
270 if (retry == 0) {
271 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
272 request->req_id);
273 ret = -ETIME;
274 goto done;
275 }
276 }
277
278
279 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
280 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
281 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
282 count, resp_addr, avp->host_sync_addr);
283 ret = -ENODATA;
284 goto done;
285 }
286
287
288 rte_memcpy(request, avp->sync_addr, sizeof(*request));
289 ret = 0;
290
291 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
292 request->result, request->req_id);
293
294done:
295 return ret;
296}
297
298static int
299avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
300{
301 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
302 struct rte_avp_request request;
303 int ret;
304
305
306 memset(&request, 0, sizeof(request));
307 request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
308 request.if_up = state;
309
310 ret = avp_dev_process_request(avp, &request);
311
312 return ret == 0 ? request.result : ret;
313}
314
315static int
316avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
317 struct rte_avp_device_config *config)
318{
319 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
320 struct rte_avp_request request;
321 int ret;
322
323
324 memset(&request, 0, sizeof(request));
325 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
326 memcpy(&request.config, config, sizeof(request.config));
327
328 ret = avp_dev_process_request(avp, &request);
329
330 return ret == 0 ? request.result : ret;
331}
332
333static int
334avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
335{
336 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
337 struct rte_avp_request request;
338 int ret;
339
340
341 memset(&request, 0, sizeof(request));
342 request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
343
344 ret = avp_dev_process_request(avp, &request);
345
346 return ret == 0 ? request.result : ret;
347}
348
349
350static inline void *
351avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
352{
353 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
354 (uintptr_t)avp->host_mbuf_addr),
355 (uintptr_t)avp->mbuf_addr);
356}
357
358
359static void *
360avp_dev_translate_address(struct rte_eth_dev *eth_dev,
361 rte_iova_t host_phys_addr)
362{
363 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
364 struct rte_mem_resource *resource;
365 struct rte_avp_memmap_info *info;
366 struct rte_avp_memmap *map;
367 off_t offset;
368 void *addr;
369 unsigned int i;
370
371 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
372 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
373 info = (struct rte_avp_memmap_info *)resource->addr;
374
375 offset = 0;
376 for (i = 0; i < info->nb_maps; i++) {
377
378 map = &info->maps[i];
379
380 if ((host_phys_addr >= map->phys_addr) &&
381 (host_phys_addr < (map->phys_addr + map->length))) {
382
383 offset += (host_phys_addr - map->phys_addr);
384 addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
385
386 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
387 host_phys_addr, addr);
388
389 return addr;
390 }
391 offset += map->length;
392 }
393
394 return NULL;
395}
396
397
398static int
399avp_dev_version_check(uint32_t version)
400{
401 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
402 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
403
404 if (device <= driver) {
405
406 return 0;
407 }
408
409 return 1;
410}
411
412
413static int
414avp_dev_check_regions(struct rte_eth_dev *eth_dev)
415{
416 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
417 struct rte_avp_memmap_info *memmap;
418 struct rte_avp_device_info *info;
419 struct rte_mem_resource *resource;
420 unsigned int i;
421
422
423 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
424 resource = &pci_dev->mem_resource[i];
425 if ((resource->phys_addr == 0) || (resource->len == 0))
426 continue;
427
428 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
429 i, resource->phys_addr,
430 resource->len, resource->addr);
431
432 switch (i) {
433 case RTE_AVP_PCI_MEMMAP_BAR:
434 memmap = (struct rte_avp_memmap_info *)resource->addr;
435 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
436 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
437 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
438 memmap->magic, memmap->version);
439 return -EINVAL;
440 }
441 break;
442
443 case RTE_AVP_PCI_DEVICE_BAR:
444 info = (struct rte_avp_device_info *)resource->addr;
445 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
446 avp_dev_version_check(info->version)) {
447 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
448 info->magic, info->version,
449 AVP_DPDK_DRIVER_VERSION);
450 return -EINVAL;
451 }
452 break;
453
454 case RTE_AVP_PCI_MEMORY_BAR:
455 case RTE_AVP_PCI_MMIO_BAR:
456 if (resource->addr == NULL) {
457 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
458 i);
459 return -EINVAL;
460 }
461 break;
462
463 case RTE_AVP_PCI_MSIX_BAR:
464 default:
465
466 break;
467 }
468 }
469
470 return 0;
471}
472
473static int
474avp_dev_detach(struct rte_eth_dev *eth_dev)
475{
476 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
477 int ret;
478
479 PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
480 eth_dev->data->port_id, avp->device_id);
481
482 rte_spinlock_lock(&avp->lock);
483
484 if (avp->flags & AVP_F_DETACHED) {
485 PMD_DRV_LOG(NOTICE, "port %u already detached\n",
486 eth_dev->data->port_id);
487 ret = 0;
488 goto unlock;
489 }
490
491
492 ret = avp_dev_ctrl_shutdown(eth_dev);
493 if (ret < 0) {
494 PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
495 ret);
496 avp->flags &= ~AVP_F_DETACHED;
497 goto unlock;
498 }
499
500 avp->flags |= AVP_F_DETACHED;
501 rte_wmb();
502
503
504 rte_delay_ms(1);
505
506 ret = 0;
507
508unlock:
509 rte_spinlock_unlock(&avp->lock);
510 return ret;
511}
512
513static void
514_avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
515{
516 struct avp_dev *avp =
517 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
518 struct avp_queue *rxq;
519 uint16_t queue_count;
520 uint16_t remainder;
521
522 rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
523
524
525
526
527
528
529
530 queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
531 remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
532 if (rx_queue_id < remainder) {
533
534 rxq->queue_base = rx_queue_id * (queue_count + 1);
535 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
536 } else {
537
538 rxq->queue_base = ((remainder * (queue_count + 1)) +
539 ((rx_queue_id - remainder) * queue_count));
540 rxq->queue_limit = rxq->queue_base + queue_count - 1;
541 }
542
543 PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
544 rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
545
546 rxq->queue_id = rxq->queue_base;
547}
548
549static void
550_avp_set_queue_counts(struct rte_eth_dev *eth_dev)
551{
552 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
553 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
554 struct rte_avp_device_info *host_info;
555 void *addr;
556
557 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
558 host_info = (struct rte_avp_device_info *)addr;
559
560
561
562
563
564
565 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
566
567
568
569
570
571
572
573
574 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
575 eth_dev->data->nb_rx_queues);
576
577 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
578 avp->num_tx_queues, avp->num_rx_queues);
579}
580
581static int
582avp_dev_attach(struct rte_eth_dev *eth_dev)
583{
584 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
585 struct rte_avp_device_config config;
586 unsigned int i;
587 int ret;
588
589 PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
590 eth_dev->data->port_id, avp->device_id);
591
592 rte_spinlock_lock(&avp->lock);
593
594 if (!(avp->flags & AVP_F_DETACHED)) {
595 PMD_DRV_LOG(NOTICE, "port %u already attached\n",
596 eth_dev->data->port_id);
597 ret = 0;
598 goto unlock;
599 }
600
601
602
603
604
605 avp->flags |= AVP_F_DETACHED;
606 rte_wmb();
607
608
609
610
611
612 ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev);
613 if (ret < 0) {
614 PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
615 ret);
616 goto unlock;
617 }
618
619 if (avp->flags & AVP_F_CONFIGURED) {
620
621
622
623
624
625
626
627 _avp_set_queue_counts(eth_dev);
628 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
629 _avp_set_rx_queue_mappings(eth_dev, i);
630
631
632
633
634
635 memset(&config, 0, sizeof(config));
636 config.device_id = avp->device_id;
637 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
638 config.driver_version = AVP_DPDK_DRIVER_VERSION;
639 config.features = avp->features;
640 config.num_tx_queues = avp->num_tx_queues;
641 config.num_rx_queues = avp->num_rx_queues;
642 config.if_up = !!(avp->flags & AVP_F_LINKUP);
643
644 ret = avp_dev_ctrl_set_config(eth_dev, &config);
645 if (ret < 0) {
646 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
647 ret);
648 goto unlock;
649 }
650 }
651
652 rte_wmb();
653 avp->flags &= ~AVP_F_DETACHED;
654
655 ret = 0;
656
657unlock:
658 rte_spinlock_unlock(&avp->lock);
659 return ret;
660}
661
662static void
663avp_dev_interrupt_handler(void *data)
664{
665 struct rte_eth_dev *eth_dev = data;
666 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
667 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
668 uint32_t status, value;
669 int ret;
670
671 if (registers == NULL)
672 rte_panic("no mapped MMIO register space\n");
673
674
675
676
677
678 status = AVP_READ32(
679 RTE_PTR_ADD(registers,
680 RTE_AVP_INTERRUPT_STATUS_OFFSET));
681
682 if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) {
683
684 value = AVP_READ32(
685 RTE_PTR_ADD(registers,
686 RTE_AVP_MIGRATION_STATUS_OFFSET));
687 switch (value) {
688 case RTE_AVP_MIGRATION_DETACHED:
689 ret = avp_dev_detach(eth_dev);
690 break;
691 case RTE_AVP_MIGRATION_ATTACHED:
692 ret = avp_dev_attach(eth_dev);
693 break;
694 default:
695 PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
696 value);
697 ret = -EINVAL;
698 }
699
700
701 value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
702 AVP_WRITE32(value,
703 RTE_PTR_ADD(registers,
704 RTE_AVP_MIGRATION_ACK_OFFSET));
705
706 PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
707 }
708
709 if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
710 PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
711 status);
712
713
714 ret = rte_intr_ack(pci_dev->intr_handle);
715 if (ret < 0) {
716 PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
717 ret);
718
719 }
720}
721
722static int
723avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
724{
725 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
726 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
727 int ret;
728
729 if (registers == NULL)
730 return -EINVAL;
731
732
733 ret = rte_intr_enable(pci_dev->intr_handle);
734 if (ret < 0) {
735 PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
736 ret);
737 return ret;
738 }
739
740
741 AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
742 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
743
744 return 0;
745}
746
747static int
748avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
749{
750 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
751 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
752 int ret;
753
754 if (registers == NULL)
755 return 0;
756
757
758 AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
759 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
760
761
762 ret = rte_intr_disable(pci_dev->intr_handle);
763 if (ret < 0) {
764 PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
765 ret);
766 return ret;
767 }
768
769 return 0;
770}
771
772static int
773avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
774{
775 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
776 int ret;
777
778
779 ret = rte_intr_callback_register(pci_dev->intr_handle,
780 avp_dev_interrupt_handler,
781 (void *)eth_dev);
782 if (ret < 0) {
783 PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
784 ret);
785 return ret;
786 }
787
788
789 return avp_dev_enable_interrupts(eth_dev);
790}
791
792static int
793avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
794{
795 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
796 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
797 uint32_t value;
798
799 if (registers == NULL)
800 return 0;
801
802 value = AVP_READ32(RTE_PTR_ADD(registers,
803 RTE_AVP_MIGRATION_STATUS_OFFSET));
804 if (value == RTE_AVP_MIGRATION_DETACHED) {
805
806 AVP_WRITE32(value,
807 RTE_PTR_ADD(registers,
808 RTE_AVP_MIGRATION_ACK_OFFSET));
809 return 1;
810 }
811 return 0;
812}
813
814
815
816
817
818static int
819avp_dev_create(struct rte_pci_device *pci_dev,
820 struct rte_eth_dev *eth_dev)
821{
822 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
823 struct rte_avp_device_info *host_info;
824 struct rte_mem_resource *resource;
825 unsigned int i;
826
827 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
828 if (resource->addr == NULL) {
829 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
830 RTE_AVP_PCI_DEVICE_BAR);
831 return -EFAULT;
832 }
833 host_info = (struct rte_avp_device_info *)resource->addr;
834
835 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
836 avp_dev_version_check(host_info->version)) {
837 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
838 host_info->magic, host_info->version,
839 AVP_DPDK_DRIVER_VERSION);
840 return -EINVAL;
841 }
842
843 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
844 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
845 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
846 RTE_AVP_GET_MINOR_VERSION(host_info->version));
847
848 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
849 host_info->min_tx_queues, host_info->max_tx_queues);
850 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
851 host_info->min_rx_queues, host_info->max_rx_queues);
852 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
853 host_info->features);
854
855 if (avp->magic != AVP_ETHDEV_MAGIC) {
856
857
858
859
860 memset(avp, 0, sizeof(*avp));
861 avp->magic = AVP_ETHDEV_MAGIC;
862 avp->dev_data = eth_dev->data;
863 avp->port_id = eth_dev->data->port_id;
864 avp->host_mbuf_size = host_info->mbuf_size;
865 avp->host_features = host_info->features;
866 rte_spinlock_init(&avp->lock);
867 memcpy(&avp->ethaddr.addr_bytes[0],
868 host_info->ethaddr, RTE_ETHER_ADDR_LEN);
869
870 avp->max_tx_queues =
871 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
872 avp->max_rx_queues =
873 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
874 } else {
875
876
877
878 if ((host_info->features & avp->features) != avp->features) {
879 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
880 avp->features, host_info->features);
881
882 }
883 }
884
885
886 avp->device_id = host_info->device_id;
887
888
889 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
890 host_info->tx_phys);
891 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
892 host_info->alloc_phys);
893 for (i = 0; i < avp->max_tx_queues; i++) {
894 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
895 host_info->tx_phys + (i * host_info->tx_size));
896
897 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
898 host_info->alloc_phys + (i * host_info->alloc_size));
899 }
900
901 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
902 host_info->rx_phys);
903 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
904 host_info->free_phys);
905 for (i = 0; i < avp->max_rx_queues; i++) {
906 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
907 host_info->rx_phys + (i * host_info->rx_size));
908 avp->free_q[i] = avp_dev_translate_address(eth_dev,
909 host_info->free_phys + (i * host_info->free_size));
910 }
911
912 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
913 host_info->req_phys);
914 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
915 host_info->resp_phys);
916 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
917 host_info->sync_phys);
918 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
919 host_info->mbuf_phys);
920 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
921 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
922 avp->sync_addr =
923 avp_dev_translate_address(eth_dev, host_info->sync_phys);
924 avp->mbuf_addr =
925 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
926
927
928
929
930
931 avp->host_mbuf_addr = host_info->mbuf_va;
932 avp->host_sync_addr = host_info->sync_va;
933
934
935
936
937 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
938 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
939 host_info->max_rx_pkt_len);
940
941 return 0;
942}
943
944
945
946
947
948static int
949eth_avp_dev_init(struct rte_eth_dev *eth_dev)
950{
951 struct avp_dev *avp =
952 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
953 struct rte_pci_device *pci_dev;
954 int ret;
955
956 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
957 eth_dev->dev_ops = &avp_eth_dev_ops;
958 eth_dev->rx_pkt_burst = &avp_recv_pkts;
959 eth_dev->tx_pkt_burst = &avp_xmit_pkts;
960
961 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
962
963
964
965
966
967
968 if (eth_dev->data->scattered_rx) {
969 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
970 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
971 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
972 }
973 return 0;
974 }
975
976 rte_eth_copy_pci_info(eth_dev, pci_dev);
977 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
978
979
980 if (avp_dev_migration_pending(eth_dev)) {
981 PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
982 return -EBUSY;
983 }
984
985
986 ret = avp_dev_check_regions(eth_dev);
987 if (ret < 0) {
988 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
989 ret);
990 return ret;
991 }
992
993
994 ret = avp_dev_setup_interrupts(eth_dev);
995 if (ret < 0) {
996 PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
997 return ret;
998 }
999
1000
1001 ret = avp_dev_create(pci_dev, eth_dev);
1002 if (ret < 0) {
1003 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
1004 return ret;
1005 }
1006
1007
1008 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
1009 RTE_ETHER_ADDR_LEN, 0);
1010 if (eth_dev->data->mac_addrs == NULL) {
1011 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
1012 RTE_ETHER_ADDR_LEN);
1013 return -ENOMEM;
1014 }
1015
1016
1017 rte_ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
1018
1019 return 0;
1020}
1021
1022static int
1023eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
1024{
1025 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1026 return -EPERM;
1027
1028 if (eth_dev->data == NULL)
1029 return 0;
1030
1031 avp_dev_close(eth_dev);
1032
1033 return 0;
1034}
1035
1036static int
1037eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1038 struct rte_pci_device *pci_dev)
1039{
1040 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter),
1041 eth_avp_dev_init);
1042}
1043
1044static int
1045eth_avp_pci_remove(struct rte_pci_device *pci_dev)
1046{
1047 return rte_eth_dev_pci_generic_remove(pci_dev,
1048 eth_avp_dev_uninit);
1049}
1050
1051static struct rte_pci_driver rte_avp_pmd = {
1052 .id_table = pci_id_avp_map,
1053 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1054 .probe = eth_avp_pci_probe,
1055 .remove = eth_avp_pci_remove,
1056};
1057
1058static int
1059avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
1060 struct avp_dev *avp)
1061{
1062 unsigned int max_rx_pktlen;
1063
1064 max_rx_pktlen = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1065 RTE_ETHER_CRC_LEN;
1066
1067 if (max_rx_pktlen > avp->guest_mbuf_size ||
1068 max_rx_pktlen > avp->host_mbuf_size) {
1069
1070
1071
1072
1073
1074
1075 return 1;
1076 }
1077
1078 if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
1079 (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
1080
1081
1082
1083
1084
1085 return 1;
1086 }
1087
1088 return 0;
1089}
1090
1091static int
1092avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
1093 uint16_t rx_queue_id,
1094 uint16_t nb_rx_desc,
1095 unsigned int socket_id,
1096 const struct rte_eth_rxconf *rx_conf,
1097 struct rte_mempool *pool)
1098{
1099 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1100 struct rte_pktmbuf_pool_private *mbp_priv;
1101 struct avp_queue *rxq;
1102
1103 if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
1104 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
1105 rx_queue_id, eth_dev->data->nb_rx_queues);
1106 return -EINVAL;
1107 }
1108
1109
1110 avp->pool = pool;
1111
1112
1113 mbp_priv = rte_mempool_get_priv(pool);
1114 avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
1115 avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
1116
1117 if (avp_dev_enable_scattered(eth_dev, avp)) {
1118 if (!eth_dev->data->scattered_rx) {
1119 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
1120 eth_dev->data->scattered_rx = 1;
1121 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
1122 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
1123 }
1124 }
1125
1126 PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
1127 avp->max_rx_pkt_len,
1128 eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
1129 avp->host_mbuf_size,
1130 avp->guest_mbuf_size);
1131
1132
1133 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
1134 RTE_CACHE_LINE_SIZE, socket_id);
1135 if (rxq == NULL) {
1136 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
1137 return -ENOMEM;
1138 }
1139
1140
1141 rxq->avp = avp;
1142 rxq->dev_data = eth_dev->data;
1143 eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
1144
1145
1146 _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
1147
1148 PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
1149
1150 (void)nb_rx_desc;
1151 (void)rx_conf;
1152 return 0;
1153}
1154
1155static int
1156avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
1157 uint16_t tx_queue_id,
1158 uint16_t nb_tx_desc,
1159 unsigned int socket_id,
1160 const struct rte_eth_txconf *tx_conf)
1161{
1162 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1163 struct avp_queue *txq;
1164
1165 if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
1166 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
1167 tx_queue_id, eth_dev->data->nb_tx_queues);
1168 return -EINVAL;
1169 }
1170
1171
1172 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
1173 RTE_CACHE_LINE_SIZE, socket_id);
1174 if (txq == NULL) {
1175 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
1176 return -ENOMEM;
1177 }
1178
1179
1180 txq->queue_id = tx_queue_id;
1181 txq->queue_base = tx_queue_id;
1182 txq->queue_limit = tx_queue_id;
1183
1184
1185 txq->avp = avp;
1186 txq->dev_data = eth_dev->data;
1187 eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
1188
1189 PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
1190
1191 (void)nb_tx_desc;
1192 (void)tx_conf;
1193 return 0;
1194}
1195
1196static inline int
1197_avp_cmp_ether_addr(struct rte_ether_addr *a, struct rte_ether_addr *b)
1198{
1199 uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
1200 uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
1201 return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
1202}
1203
1204static inline int
1205_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
1206{
1207 struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1208
1209 if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->dst_addr) == 0)) {
1210
1211 return 0;
1212 }
1213
1214 if (likely(rte_is_broadcast_ether_addr(ð->dst_addr))) {
1215
1216 return 0;
1217 }
1218
1219 if (likely(rte_is_multicast_ether_addr(ð->dst_addr))) {
1220
1221 return 0;
1222 }
1223
1224 if (avp->flags & AVP_F_PROMISC) {
1225
1226 return 0;
1227 }
1228
1229 return -1;
1230}
1231
1232#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1233static inline void
1234__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
1235{
1236 struct rte_avp_desc *first_buf;
1237 struct rte_avp_desc *pkt_buf;
1238 unsigned int pkt_len;
1239 unsigned int nb_segs;
1240 void *pkt_data;
1241 unsigned int i;
1242
1243 first_buf = avp_dev_translate_buffer(avp, buf);
1244
1245 i = 0;
1246 pkt_len = 0;
1247 nb_segs = first_buf->nb_segs;
1248 do {
1249
1250 pkt_buf = avp_dev_translate_buffer(avp, buf);
1251 if (pkt_buf == NULL)
1252 rte_panic("bad buffer: segment %u has an invalid address %p\n",
1253 i, buf);
1254 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1255 if (pkt_data == NULL)
1256 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
1257 i);
1258 if (pkt_buf->data_len == 0)
1259 rte_panic("bad buffer: segment %u has 0 data length\n",
1260 i);
1261 pkt_len += pkt_buf->data_len;
1262 nb_segs--;
1263 i++;
1264
1265 } while (nb_segs && (buf = pkt_buf->next) != NULL);
1266
1267 if (nb_segs != 0)
1268 rte_panic("bad buffer: expected %u segments found %u\n",
1269 first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
1270 if (pkt_len != first_buf->pkt_len)
1271 rte_panic("bad buffer: expected length %u found %u\n",
1272 first_buf->pkt_len, pkt_len);
1273}
1274
1275#define avp_dev_buffer_sanity_check(a, b) \
1276 __avp_dev_buffer_sanity_check((a), (b))
1277
1278#else
1279
1280#define avp_dev_buffer_sanity_check(a, b) do {} while (0)
1281
1282#endif
1283
1284
1285
1286
1287
1288static inline struct rte_mbuf *
1289avp_dev_copy_from_buffers(struct avp_dev *avp,
1290 struct rte_avp_desc *buf,
1291 struct rte_mbuf **mbufs,
1292 unsigned int count)
1293{
1294 struct rte_mbuf *m_previous = NULL;
1295 struct rte_avp_desc *pkt_buf;
1296 unsigned int total_length = 0;
1297 unsigned int copy_length;
1298 unsigned int src_offset;
1299 struct rte_mbuf *m;
1300 uint16_t ol_flags;
1301 uint16_t vlan_tci;
1302 void *pkt_data;
1303 unsigned int i;
1304
1305 avp_dev_buffer_sanity_check(avp, buf);
1306
1307
1308 pkt_buf = avp_dev_translate_buffer(avp, buf);
1309 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1310 total_length = pkt_buf->pkt_len;
1311 src_offset = 0;
1312
1313 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1314 ol_flags = RTE_MBUF_F_RX_VLAN;
1315 vlan_tci = pkt_buf->vlan_tci;
1316 } else {
1317 ol_flags = 0;
1318 vlan_tci = 0;
1319 }
1320
1321 for (i = 0; (i < count) && (buf != NULL); i++) {
1322
1323 m = mbufs[i];
1324
1325 if (m_previous != NULL)
1326 m_previous->next = m;
1327
1328 m_previous = m;
1329
1330 do {
1331
1332
1333
1334
1335 copy_length = RTE_MIN((avp->guest_mbuf_size -
1336 rte_pktmbuf_data_len(m)),
1337 (pkt_buf->data_len -
1338 src_offset));
1339 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1340 rte_pktmbuf_data_len(m)),
1341 RTE_PTR_ADD(pkt_data, src_offset),
1342 copy_length);
1343 rte_pktmbuf_data_len(m) += copy_length;
1344 src_offset += copy_length;
1345
1346 if (likely(src_offset == pkt_buf->data_len)) {
1347
1348 buf = pkt_buf->next;
1349 if (buf != NULL) {
1350 pkt_buf = avp_dev_translate_buffer(
1351 avp, buf);
1352 pkt_data = avp_dev_translate_buffer(
1353 avp, pkt_buf->data);
1354 src_offset = 0;
1355 }
1356 }
1357
1358 if (unlikely(rte_pktmbuf_data_len(m) ==
1359 avp->guest_mbuf_size)) {
1360
1361 break;
1362 }
1363
1364 } while (buf != NULL);
1365 }
1366
1367 m = mbufs[0];
1368 m->ol_flags = ol_flags;
1369 m->nb_segs = count;
1370 rte_pktmbuf_pkt_len(m) = total_length;
1371 m->vlan_tci = vlan_tci;
1372
1373 __rte_mbuf_sanity_check(m, 1);
1374
1375 return m;
1376}
1377
1378static uint16_t
1379avp_recv_scattered_pkts(void *rx_queue,
1380 struct rte_mbuf **rx_pkts,
1381 uint16_t nb_pkts)
1382{
1383 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1384 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1385 struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1386 struct avp_dev *avp = rxq->avp;
1387 struct rte_avp_desc *pkt_buf;
1388 struct rte_avp_fifo *free_q;
1389 struct rte_avp_fifo *rx_q;
1390 struct rte_avp_desc *buf;
1391 unsigned int count, avail, n;
1392 unsigned int guest_mbuf_size;
1393 struct rte_mbuf *m;
1394 unsigned int required;
1395 unsigned int buf_len;
1396 unsigned int port_id;
1397 unsigned int i;
1398
1399 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1400
1401 return 0;
1402 }
1403
1404 guest_mbuf_size = avp->guest_mbuf_size;
1405 port_id = avp->port_id;
1406 rx_q = avp->rx_q[rxq->queue_id];
1407 free_q = avp->free_q[rxq->queue_id];
1408
1409
1410 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1411 (rxq->queue_id + 1) : rxq->queue_base;
1412
1413
1414 count = avp_fifo_free_count(free_q);
1415
1416
1417 avail = avp_fifo_count(rx_q);
1418
1419
1420 count = RTE_MIN(count, avail);
1421 count = RTE_MIN(count, nb_pkts);
1422 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1423
1424 if (unlikely(count == 0)) {
1425
1426 return 0;
1427 }
1428
1429
1430 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1431 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1432 count, rx_q);
1433
1434 count = 0;
1435 for (i = 0; i < n; i++) {
1436
1437 if (i + 1 < n) {
1438 pkt_buf = avp_dev_translate_buffer(avp,
1439 avp_bufs[i + 1]);
1440 rte_prefetch0(pkt_buf);
1441 }
1442 buf = avp_bufs[i];
1443
1444
1445 pkt_buf = avp_dev_translate_buffer(avp, buf);
1446 buf_len = pkt_buf->pkt_len;
1447
1448
1449 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1450 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1451 rxq->dev_data->rx_mbuf_alloc_failed++;
1452 continue;
1453 }
1454
1455
1456 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1457
1458
1459 m->port = port_id;
1460
1461 if (_avp_mac_filter(avp, m) != 0) {
1462
1463 rte_pktmbuf_free(m);
1464 continue;
1465 }
1466
1467
1468 rx_pkts[count++] = m;
1469 rxq->bytes += buf_len;
1470 }
1471
1472 rxq->packets += count;
1473
1474
1475 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1476
1477 return count;
1478}
1479
1480
1481static uint16_t
1482avp_recv_pkts(void *rx_queue,
1483 struct rte_mbuf **rx_pkts,
1484 uint16_t nb_pkts)
1485{
1486 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1487 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1488 struct avp_dev *avp = rxq->avp;
1489 struct rte_avp_desc *pkt_buf;
1490 struct rte_avp_fifo *free_q;
1491 struct rte_avp_fifo *rx_q;
1492 unsigned int count, avail, n;
1493 unsigned int pkt_len;
1494 struct rte_mbuf *m;
1495 char *pkt_data;
1496 unsigned int i;
1497
1498 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1499
1500 return 0;
1501 }
1502
1503 rx_q = avp->rx_q[rxq->queue_id];
1504 free_q = avp->free_q[rxq->queue_id];
1505
1506
1507 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1508 (rxq->queue_id + 1) : rxq->queue_base;
1509
1510
1511 count = avp_fifo_free_count(free_q);
1512
1513
1514 avail = avp_fifo_count(rx_q);
1515
1516
1517 count = RTE_MIN(count, avail);
1518 count = RTE_MIN(count, nb_pkts);
1519 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1520
1521 if (unlikely(count == 0)) {
1522
1523 return 0;
1524 }
1525
1526
1527 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1528 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1529 count, rx_q);
1530
1531 count = 0;
1532 for (i = 0; i < n; i++) {
1533
1534 if (i < n - 1) {
1535 pkt_buf = avp_dev_translate_buffer(avp,
1536 avp_bufs[i + 1]);
1537 rte_prefetch0(pkt_buf);
1538 }
1539
1540
1541 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1542 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1543 pkt_len = pkt_buf->pkt_len;
1544
1545 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1546 (pkt_buf->nb_segs > 1))) {
1547
1548
1549
1550
1551 rxq->errors++;
1552 continue;
1553 }
1554
1555
1556 m = rte_pktmbuf_alloc(avp->pool);
1557 if (unlikely(m == NULL)) {
1558 rxq->dev_data->rx_mbuf_alloc_failed++;
1559 continue;
1560 }
1561
1562
1563 m->data_off = RTE_PKTMBUF_HEADROOM;
1564 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1565
1566
1567 rte_pktmbuf_data_len(m) = pkt_len;
1568 rte_pktmbuf_pkt_len(m) = pkt_len;
1569 m->port = avp->port_id;
1570
1571 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1572 m->ol_flags = RTE_MBUF_F_RX_VLAN;
1573 m->vlan_tci = pkt_buf->vlan_tci;
1574 }
1575
1576 if (_avp_mac_filter(avp, m) != 0) {
1577
1578 rte_pktmbuf_free(m);
1579 continue;
1580 }
1581
1582
1583 rx_pkts[count++] = m;
1584 rxq->bytes += pkt_len;
1585 }
1586
1587 rxq->packets += count;
1588
1589
1590 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1591
1592 return count;
1593}
1594
1595
1596
1597
1598
1599
1600static inline uint16_t
1601avp_dev_copy_to_buffers(struct avp_dev *avp,
1602 struct rte_mbuf *mbuf,
1603 struct rte_avp_desc **buffers,
1604 unsigned int count)
1605{
1606 struct rte_avp_desc *previous_buf = NULL;
1607 struct rte_avp_desc *first_buf = NULL;
1608 struct rte_avp_desc *pkt_buf;
1609 struct rte_avp_desc *buf;
1610 size_t total_length;
1611 struct rte_mbuf *m;
1612 size_t copy_length;
1613 size_t src_offset;
1614 char *pkt_data;
1615 unsigned int i;
1616
1617 __rte_mbuf_sanity_check(mbuf, 1);
1618
1619 m = mbuf;
1620 src_offset = 0;
1621 total_length = rte_pktmbuf_pkt_len(m);
1622 for (i = 0; (i < count) && (m != NULL); i++) {
1623
1624 buf = buffers[i];
1625
1626 if (i < count - 1) {
1627
1628 pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1629 rte_prefetch0(pkt_buf);
1630 }
1631
1632
1633 pkt_buf = avp_dev_translate_buffer(avp, buf);
1634 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1635
1636
1637 if (previous_buf != NULL)
1638 previous_buf->next = buf;
1639 else
1640 first_buf = pkt_buf;
1641
1642 previous_buf = pkt_buf;
1643
1644 do {
1645
1646
1647
1648
1649 copy_length = RTE_MIN((avp->host_mbuf_size -
1650 pkt_buf->data_len),
1651 (rte_pktmbuf_data_len(m) -
1652 src_offset));
1653 rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1654 RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1655 src_offset),
1656 copy_length);
1657 pkt_buf->data_len += copy_length;
1658 src_offset += copy_length;
1659
1660 if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1661
1662 m = m->next;
1663 src_offset = 0;
1664 }
1665
1666 if (unlikely(pkt_buf->data_len ==
1667 avp->host_mbuf_size)) {
1668
1669 break;
1670 }
1671
1672 } while (m != NULL);
1673 }
1674
1675 first_buf->nb_segs = count;
1676 first_buf->pkt_len = total_length;
1677
1678 if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
1679 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1680 first_buf->vlan_tci = mbuf->vlan_tci;
1681 }
1682
1683 avp_dev_buffer_sanity_check(avp, buffers[0]);
1684
1685 return total_length;
1686}
1687
1688
1689static uint16_t
1690avp_xmit_scattered_pkts(void *tx_queue,
1691 struct rte_mbuf **tx_pkts,
1692 uint16_t nb_pkts)
1693{
1694 struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1695 RTE_AVP_MAX_MBUF_SEGMENTS)] = {};
1696 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1697 struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1698 struct avp_dev *avp = txq->avp;
1699 struct rte_avp_fifo *alloc_q;
1700 struct rte_avp_fifo *tx_q;
1701 unsigned int count, avail, n;
1702 unsigned int orig_nb_pkts;
1703 struct rte_mbuf *m;
1704 unsigned int required;
1705 unsigned int segments;
1706 unsigned int tx_bytes;
1707 unsigned int i;
1708
1709 orig_nb_pkts = nb_pkts;
1710 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1711
1712
1713 txq->errors += nb_pkts;
1714 return 0;
1715 }
1716
1717 tx_q = avp->tx_q[txq->queue_id];
1718 alloc_q = avp->alloc_q[txq->queue_id];
1719
1720
1721 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1722 nb_pkts = AVP_MAX_TX_BURST;
1723
1724
1725 avail = avp_fifo_count(alloc_q);
1726 if (unlikely(avail > (AVP_MAX_TX_BURST *
1727 RTE_AVP_MAX_MBUF_SEGMENTS)))
1728 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1729
1730
1731 count = avp_fifo_free_count(tx_q);
1732
1733
1734 nb_pkts = RTE_MIN(count, nb_pkts);
1735
1736
1737 count = 0;
1738 segments = 0;
1739 for (i = 0; i < nb_pkts; i++) {
1740 m = tx_pkts[i];
1741 if (likely(i < (unsigned int)nb_pkts - 1)) {
1742
1743 rte_prefetch0(tx_pkts[i + 1]);
1744 }
1745 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1746 avp->host_mbuf_size;
1747
1748 if (unlikely((required == 0) ||
1749 (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1750 break;
1751 else if (unlikely(required + segments > avail))
1752 break;
1753 segments += required;
1754 count++;
1755 }
1756 nb_pkts = count;
1757
1758 if (unlikely(nb_pkts == 0)) {
1759
1760 txq->errors += orig_nb_pkts;
1761 return 0;
1762 }
1763
1764 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1765 nb_pkts, tx_q);
1766
1767
1768 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1769 if (unlikely(n != segments)) {
1770 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1771 "n=%u, segments=%u, orig=%u\n",
1772 n, segments, orig_nb_pkts);
1773 txq->errors += orig_nb_pkts;
1774 return 0;
1775 }
1776
1777 tx_bytes = 0;
1778 count = 0;
1779 for (i = 0; i < nb_pkts; i++) {
1780
1781 m = tx_pkts[i];
1782
1783
1784 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1785 avp->host_mbuf_size;
1786
1787 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1788 &avp_bufs[count], required);
1789 tx_bufs[i] = avp_bufs[count];
1790 count += required;
1791
1792
1793 rte_pktmbuf_free(m);
1794 }
1795
1796 txq->packets += nb_pkts;
1797 txq->bytes += tx_bytes;
1798
1799#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1800 for (i = 0; i < nb_pkts; i++)
1801 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1802#endif
1803
1804
1805 n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1806 if (unlikely(n != orig_nb_pkts))
1807 txq->errors += (orig_nb_pkts - n);
1808
1809 return n;
1810}
1811
1812
1813static uint16_t
1814avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1815{
1816 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1817 struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1818 struct avp_dev *avp = txq->avp;
1819 struct rte_avp_desc *pkt_buf;
1820 struct rte_avp_fifo *alloc_q;
1821 struct rte_avp_fifo *tx_q;
1822 unsigned int count, avail, n;
1823 struct rte_mbuf *m;
1824 unsigned int pkt_len;
1825 unsigned int tx_bytes;
1826 char *pkt_data;
1827 unsigned int i;
1828
1829 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1830
1831
1832 txq->errors++;
1833 return 0;
1834 }
1835
1836 tx_q = avp->tx_q[txq->queue_id];
1837 alloc_q = avp->alloc_q[txq->queue_id];
1838
1839
1840 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1841 nb_pkts = AVP_MAX_TX_BURST;
1842
1843
1844 avail = avp_fifo_count(alloc_q);
1845
1846
1847 count = avp_fifo_free_count(tx_q);
1848
1849
1850 count = RTE_MIN(count, avail);
1851 count = RTE_MIN(count, nb_pkts);
1852
1853 if (unlikely(count == 0)) {
1854
1855 txq->errors += nb_pkts;
1856 return 0;
1857 }
1858
1859 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1860 count, tx_q);
1861
1862
1863 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1864 if (unlikely(n != count)) {
1865 txq->errors++;
1866 return 0;
1867 }
1868
1869 tx_bytes = 0;
1870 for (i = 0; i < count; i++) {
1871
1872 if (i < count - 1) {
1873 pkt_buf = avp_dev_translate_buffer(avp,
1874 avp_bufs[i + 1]);
1875 rte_prefetch0(pkt_buf);
1876 }
1877
1878
1879 m = tx_pkts[i];
1880
1881
1882 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1883 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1884 pkt_len = rte_pktmbuf_pkt_len(m);
1885
1886 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1887 (pkt_len > avp->host_mbuf_size))) {
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 txq->errors++;
1898 pkt_len = RTE_MIN(avp->guest_mbuf_size,
1899 avp->host_mbuf_size);
1900 }
1901
1902
1903 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1904 pkt_buf->pkt_len = pkt_len;
1905 pkt_buf->data_len = pkt_len;
1906 pkt_buf->nb_segs = 1;
1907 pkt_buf->next = NULL;
1908
1909 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
1910 pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1911 pkt_buf->vlan_tci = m->vlan_tci;
1912 }
1913
1914 tx_bytes += pkt_len;
1915
1916
1917 rte_pktmbuf_free(m);
1918 }
1919
1920 txq->packets += count;
1921 txq->bytes += tx_bytes;
1922
1923
1924 n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1925
1926 return n;
1927}
1928
1929static void
1930avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1931{
1932 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
1933 rte_free(eth_dev->data->rx_queues[rx_queue_id]);
1934 eth_dev->data->rx_queues[rx_queue_id] = NULL;
1935 }
1936}
1937
1938static void
1939avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
1940{
1941 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1942 struct rte_eth_dev_data *data = avp->dev_data;
1943 unsigned int i;
1944
1945 for (i = 0; i < avp->num_rx_queues; i++) {
1946 if (data->rx_queues[i]) {
1947 rte_free(data->rx_queues[i]);
1948 data->rx_queues[i] = NULL;
1949 }
1950 }
1951}
1952
1953static void
1954avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
1955{
1956 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
1957 rte_free(eth_dev->data->tx_queues[tx_queue_id]);
1958 eth_dev->data->tx_queues[tx_queue_id] = NULL;
1959 }
1960}
1961
1962static void
1963avp_dev_tx_queue_release_all(struct rte_eth_dev *eth_dev)
1964{
1965 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1966 struct rte_eth_dev_data *data = avp->dev_data;
1967 unsigned int i;
1968
1969 for (i = 0; i < avp->num_tx_queues; i++) {
1970 if (data->tx_queues[i]) {
1971 rte_free(data->tx_queues[i]);
1972 data->tx_queues[i] = NULL;
1973 }
1974 }
1975}
1976
1977static int
1978avp_dev_configure(struct rte_eth_dev *eth_dev)
1979{
1980 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1981 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1982 struct rte_avp_device_info *host_info;
1983 struct rte_avp_device_config config;
1984 int mask = 0;
1985 void *addr;
1986 int ret;
1987
1988 rte_spinlock_lock(&avp->lock);
1989 if (avp->flags & AVP_F_DETACHED) {
1990 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
1991 ret = -ENOTSUP;
1992 goto unlock;
1993 }
1994
1995 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
1996 host_info = (struct rte_avp_device_info *)addr;
1997
1998
1999 _avp_set_queue_counts(eth_dev);
2000
2001 mask = (RTE_ETH_VLAN_STRIP_MASK |
2002 RTE_ETH_VLAN_FILTER_MASK |
2003 RTE_ETH_VLAN_EXTEND_MASK);
2004 ret = avp_vlan_offload_set(eth_dev, mask);
2005 if (ret < 0) {
2006 PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
2007 ret);
2008 goto unlock;
2009 }
2010
2011
2012 memset(&config, 0, sizeof(config));
2013 config.device_id = host_info->device_id;
2014 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
2015 config.driver_version = AVP_DPDK_DRIVER_VERSION;
2016 config.features = avp->features;
2017 config.num_tx_queues = avp->num_tx_queues;
2018 config.num_rx_queues = avp->num_rx_queues;
2019
2020 ret = avp_dev_ctrl_set_config(eth_dev, &config);
2021 if (ret < 0) {
2022 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
2023 ret);
2024 goto unlock;
2025 }
2026
2027 avp->flags |= AVP_F_CONFIGURED;
2028 ret = 0;
2029
2030unlock:
2031 rte_spinlock_unlock(&avp->lock);
2032 return ret;
2033}
2034
2035static int
2036avp_dev_start(struct rte_eth_dev *eth_dev)
2037{
2038 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2039 int ret;
2040
2041 rte_spinlock_lock(&avp->lock);
2042 if (avp->flags & AVP_F_DETACHED) {
2043 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2044 ret = -ENOTSUP;
2045 goto unlock;
2046 }
2047
2048
2049 ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
2050 if (ret < 0) {
2051 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2052 ret);
2053 goto unlock;
2054 }
2055
2056
2057 avp->flags |= AVP_F_LINKUP;
2058
2059 ret = 0;
2060
2061unlock:
2062 rte_spinlock_unlock(&avp->lock);
2063 return ret;
2064}
2065
2066static int
2067avp_dev_stop(struct rte_eth_dev *eth_dev)
2068{
2069 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2070 int ret;
2071
2072 rte_spinlock_lock(&avp->lock);
2073 if (avp->flags & AVP_F_DETACHED) {
2074 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2075 ret = -ENOTSUP;
2076 goto unlock;
2077 }
2078
2079
2080 avp->flags &= ~AVP_F_LINKUP;
2081
2082
2083 ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
2084 if (ret < 0) {
2085 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2086 ret);
2087 }
2088
2089unlock:
2090 rte_spinlock_unlock(&avp->lock);
2091 return ret;
2092}
2093
2094static int
2095avp_dev_close(struct rte_eth_dev *eth_dev)
2096{
2097 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2098 int ret;
2099
2100 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2101 return 0;
2102
2103 rte_spinlock_lock(&avp->lock);
2104 if (avp->flags & AVP_F_DETACHED) {
2105 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2106 goto unlock;
2107 }
2108
2109
2110 avp->flags &= ~AVP_F_LINKUP;
2111 avp->flags &= ~AVP_F_CONFIGURED;
2112
2113 ret = avp_dev_disable_interrupts(eth_dev);
2114 if (ret < 0) {
2115 PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
2116
2117 }
2118
2119
2120 ret = avp_dev_ctrl_shutdown(eth_dev);
2121 if (ret < 0) {
2122 PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
2123 ret);
2124
2125 }
2126
2127
2128 avp_dev_rx_queue_release_all(eth_dev);
2129 avp_dev_tx_queue_release_all(eth_dev);
2130
2131unlock:
2132 rte_spinlock_unlock(&avp->lock);
2133 return 0;
2134}
2135
2136static int
2137avp_dev_link_update(struct rte_eth_dev *eth_dev,
2138 __rte_unused int wait_to_complete)
2139{
2140 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2141 struct rte_eth_link *link = ð_dev->data->dev_link;
2142
2143 link->link_speed = RTE_ETH_SPEED_NUM_10G;
2144 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2145 link->link_status = !!(avp->flags & AVP_F_LINKUP);
2146
2147 return -1;
2148}
2149
2150static int
2151avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2152{
2153 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2154
2155 rte_spinlock_lock(&avp->lock);
2156 if ((avp->flags & AVP_F_PROMISC) == 0) {
2157 avp->flags |= AVP_F_PROMISC;
2158 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
2159 eth_dev->data->port_id);
2160 }
2161 rte_spinlock_unlock(&avp->lock);
2162
2163 return 0;
2164}
2165
2166static int
2167avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
2168{
2169 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2170
2171 rte_spinlock_lock(&avp->lock);
2172 if ((avp->flags & AVP_F_PROMISC) != 0) {
2173 avp->flags &= ~AVP_F_PROMISC;
2174 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
2175 eth_dev->data->port_id);
2176 }
2177 rte_spinlock_unlock(&avp->lock);
2178
2179 return 0;
2180}
2181
2182static int
2183avp_dev_info_get(struct rte_eth_dev *eth_dev,
2184 struct rte_eth_dev_info *dev_info)
2185{
2186 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2187
2188 dev_info->max_rx_queues = avp->max_rx_queues;
2189 dev_info->max_tx_queues = avp->max_tx_queues;
2190 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
2191 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
2192 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
2193 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2194 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2195 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
2196 }
2197
2198 return 0;
2199}
2200
2201static int
2202avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
2203{
2204 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2205 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
2206 uint64_t offloads = dev_conf->rxmode.offloads;
2207
2208 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2209 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2210 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2211 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
2212 else
2213 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
2214 } else {
2215 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
2216 }
2217 }
2218
2219 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2220 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
2221 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
2222 }
2223
2224 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
2225 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
2226 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
2227 }
2228
2229 return 0;
2230}
2231
2232static int
2233avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
2234{
2235 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2236 unsigned int i;
2237
2238 for (i = 0; i < avp->num_rx_queues; i++) {
2239 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2240
2241 if (rxq) {
2242 stats->ipackets += rxq->packets;
2243 stats->ibytes += rxq->bytes;
2244 stats->ierrors += rxq->errors;
2245
2246 stats->q_ipackets[i] += rxq->packets;
2247 stats->q_ibytes[i] += rxq->bytes;
2248 stats->q_errors[i] += rxq->errors;
2249 }
2250 }
2251
2252 for (i = 0; i < avp->num_tx_queues; i++) {
2253 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2254
2255 if (txq) {
2256 stats->opackets += txq->packets;
2257 stats->obytes += txq->bytes;
2258 stats->oerrors += txq->errors;
2259
2260 stats->q_opackets[i] += txq->packets;
2261 stats->q_obytes[i] += txq->bytes;
2262 }
2263 }
2264
2265 return 0;
2266}
2267
2268static int
2269avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
2270{
2271 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2272 unsigned int i;
2273
2274 for (i = 0; i < avp->num_rx_queues; i++) {
2275 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2276
2277 if (rxq) {
2278 rxq->bytes = 0;
2279 rxq->packets = 0;
2280 rxq->errors = 0;
2281 }
2282 }
2283
2284 for (i = 0; i < avp->num_tx_queues; i++) {
2285 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2286
2287 if (txq) {
2288 txq->bytes = 0;
2289 txq->packets = 0;
2290 txq->errors = 0;
2291 }
2292 }
2293
2294 return 0;
2295}
2296
2297RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
2298RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
2299RTE_LOG_REGISTER_SUFFIX(avp_logtype_driver, driver, NOTICE);
2300