1
2
3
4
5
6
7
8#include <unistd.h>
9#include <stdio.h>
10#include <sys/types.h>
11#include <string.h>
12#include <stdlib.h>
13#include <fcntl.h>
14#include <errno.h>
15#include <sys/ioctl.h>
16#include <sys/stat.h>
17#include <sys/mman.h>
18#include <sys/vfs.h>
19#include <libgen.h>
20#include <dirent.h>
21#include <sys/eventfd.h>
22
23#include <eal_filesystem.h>
24#include <rte_mbuf.h>
25#include <ethdev_driver.h>
26#include <rte_malloc.h>
27#include <rte_memcpy.h>
28#include <rte_string_fns.h>
29#include <rte_cycles.h>
30#include <rte_kvargs.h>
31#include <rte_dev.h>
32#include <rte_bus.h>
33#include <rte_eal_memconfig.h>
34
35#include "rte_fslmc.h"
36#include "fslmc_vfio.h"
37#include "fslmc_logs.h"
38#include <mc/fsl_dpmng.h>
39
40#include "portal/dpaa2_hw_pvt.h"
41#include "portal/dpaa2_hw_dpio.h"
42
43#define FSLMC_CONTAINER_MAX_LEN 8
44
45
46static struct fslmc_vfio_group vfio_group;
47static struct fslmc_vfio_container vfio_container;
48static int container_device_fd;
49char *fslmc_container;
50static int fslmc_iommu_type;
51static uint32_t *msi_intr_vaddr;
52void *(*rte_mcp_ptr_list);
53
54void *
55dpaa2_get_mcp_ptr(int portal_idx)
56{
57 if (rte_mcp_ptr_list)
58 return rte_mcp_ptr_list[portal_idx];
59 else
60 return NULL;
61}
62
63static struct rte_dpaa2_object_list dpaa2_obj_list =
64 TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
65
66
67void
68rte_fslmc_object_register(struct rte_dpaa2_object *object)
69{
70 RTE_VERIFY(object);
71
72 TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
73}
74
75int
76fslmc_get_container_group(int *groupid)
77{
78 int ret;
79 char *container;
80
81 if (!fslmc_container) {
82 container = getenv("DPRC");
83 if (container == NULL) {
84 DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
85 return -EINVAL;
86 }
87
88 if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
89 DPAA2_BUS_ERR("Invalid container name: %s", container);
90 return -1;
91 }
92
93 fslmc_container = strdup(container);
94 if (!fslmc_container) {
95 DPAA2_BUS_ERR("Mem alloc failure; Container name");
96 return -ENOMEM;
97 }
98 }
99
100 fslmc_iommu_type = (rte_vfio_noiommu_is_enabled() == 1) ?
101 RTE_VFIO_NOIOMMU : VFIO_TYPE1_IOMMU;
102
103
104 ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
105 fslmc_container, groupid);
106 if (ret <= 0) {
107 DPAA2_BUS_ERR("Unable to find %s IOMMU group", fslmc_container);
108 return -1;
109 }
110
111 DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
112 fslmc_container, *groupid);
113
114 return 0;
115}
116
117static int
118vfio_connect_container(void)
119{
120 int fd, ret;
121
122 if (vfio_container.used) {
123 DPAA2_BUS_DEBUG("No container available");
124 return -1;
125 }
126
127
128 if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
129 &vfio_container.fd)) {
130 DPAA2_BUS_DEBUG(
131 "Container pre-exists with FD[0x%x] for this group",
132 vfio_container.fd);
133 vfio_group.container = &vfio_container;
134 return 0;
135 }
136
137
138 fd = rte_vfio_get_container_fd();
139 if (fd < 0) {
140 DPAA2_BUS_ERR("Failed to open VFIO container");
141 return -errno;
142 }
143
144
145 if (ioctl(fd, VFIO_CHECK_EXTENSION, fslmc_iommu_type)) {
146
147 ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
148 if (ret) {
149 DPAA2_BUS_ERR("Failed to setup group container");
150 close(fd);
151 return -errno;
152 }
153
154 ret = ioctl(fd, VFIO_SET_IOMMU, fslmc_iommu_type);
155 if (ret) {
156 DPAA2_BUS_ERR("Failed to setup VFIO iommu");
157 close(fd);
158 return -errno;
159 }
160 } else {
161 DPAA2_BUS_ERR("No supported IOMMU available");
162 close(fd);
163 return -EINVAL;
164 }
165
166 vfio_container.used = 1;
167 vfio_container.fd = fd;
168 vfio_container.group = &vfio_group;
169 vfio_group.container = &vfio_container;
170
171 return 0;
172}
173
174static int vfio_map_irq_region(struct fslmc_vfio_group *group)
175{
176 int ret;
177 unsigned long *vaddr = NULL;
178 struct vfio_iommu_type1_dma_map map = {
179 .argsz = sizeof(map),
180 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
181 .vaddr = 0x6030000,
182 .iova = 0x6030000,
183 .size = 0x1000,
184 };
185
186 vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
187 PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
188 if (vaddr == MAP_FAILED) {
189 DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
190 return -errno;
191 }
192
193 msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
194 map.vaddr = (unsigned long)vaddr;
195 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
196 if (ret == 0)
197 return 0;
198
199 DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
200 return -errno;
201}
202
203static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
204static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
205
206static void
207fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
208 void *arg __rte_unused)
209{
210 struct rte_memseg_list *msl;
211 struct rte_memseg *ms;
212 size_t cur_len = 0, map_len = 0;
213 uint64_t virt_addr;
214 rte_iova_t iova_addr;
215 int ret;
216
217 msl = rte_mem_virt2memseg_list(addr);
218
219 while (cur_len < len) {
220 const void *va = RTE_PTR_ADD(addr, cur_len);
221
222 ms = rte_mem_virt2memseg(va, msl);
223 iova_addr = ms->iova;
224 virt_addr = ms->addr_64;
225 map_len = ms->len;
226
227 DPAA2_BUS_DEBUG("Request for %s, va=%p, "
228 "virt_addr=0x%" PRIx64 ", "
229 "iova=0x%" PRIx64 ", map_len=%zu",
230 type == RTE_MEM_EVENT_ALLOC ?
231 "alloc" : "dealloc",
232 va, virt_addr, iova_addr, map_len);
233
234
235 if (iova_addr == RTE_BAD_IOVA) {
236 DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
237 cur_len += map_len;
238 continue;
239 }
240
241 if (type == RTE_MEM_EVENT_ALLOC)
242 ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
243 else
244 ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
245
246 if (ret != 0) {
247 DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
248 "Map=%d, addr=%p, len=%zu, err:(%d)",
249 type, va, map_len, ret);
250 return;
251 }
252
253 cur_len += map_len;
254 }
255
256 if (type == RTE_MEM_EVENT_ALLOC)
257 DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
258 addr, len);
259 else
260 DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
261 addr, len);
262}
263
264static int
265fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
266{
267 struct fslmc_vfio_group *group;
268 struct vfio_iommu_type1_dma_map dma_map = {
269 .argsz = sizeof(struct vfio_iommu_type1_dma_map),
270 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
271 };
272 int ret;
273
274 if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
275 DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
276 return 0;
277 }
278
279 dma_map.size = len;
280 dma_map.vaddr = vaddr;
281
282#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
283 dma_map.iova = iovaddr;
284#else
285 dma_map.iova = dma_map.vaddr;
286#endif
287
288
289 group = &vfio_group;
290
291 if (!group->container) {
292 DPAA2_BUS_ERR("Container is not connected ");
293 return -1;
294 }
295
296 DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
297 (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
298 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
299 if (ret) {
300 DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
301 errno);
302 return -1;
303 }
304
305 return 0;
306}
307
308static int
309fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
310{
311 struct fslmc_vfio_group *group;
312 struct vfio_iommu_type1_dma_unmap dma_unmap = {
313 .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
314 .flags = 0,
315 };
316 int ret;
317
318 if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
319 DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
320 return 0;
321 }
322
323 dma_unmap.size = len;
324 dma_unmap.iova = vaddr;
325
326
327 group = &vfio_group;
328
329 if (!group->container) {
330 DPAA2_BUS_ERR("Container is not connected ");
331 return -1;
332 }
333
334 DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
335 (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
336 ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
337 if (ret) {
338 DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
339 errno);
340 return -1;
341 }
342
343 return 0;
344}
345
346static int
347fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
348 const struct rte_memseg *ms, void *arg)
349{
350 int *n_segs = arg;
351 int ret;
352
353
354 if (ms->iova == RTE_BAD_IOVA)
355 return 0;
356
357 ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
358 if (ret)
359 DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
360 ms->addr, ms->len);
361 else
362 (*n_segs)++;
363
364 return ret;
365}
366
367int
368rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size)
369{
370 int ret;
371 struct fslmc_vfio_group *group;
372 struct vfio_iommu_type1_dma_map dma_map = {
373 .argsz = sizeof(struct vfio_iommu_type1_dma_map),
374 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
375 };
376
377 if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
378 DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
379 return 0;
380 }
381
382
383 group = &vfio_group;
384 if (!group->container) {
385 DPAA2_BUS_ERR("Container is not connected");
386 return -1;
387 }
388
389 dma_map.size = size;
390 dma_map.vaddr = vaddr;
391 dma_map.iova = iova;
392
393 DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n",
394 (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova,
395 (uint64_t)dma_map.size);
396 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
397 &dma_map);
398 if (ret) {
399 printf("Unable to map DMA address (errno = %d)\n",
400 errno);
401 return ret;
402 }
403
404 return 0;
405}
406
407int rte_fslmc_vfio_dmamap(void)
408{
409 int i = 0, ret;
410
411
412 rte_mcfg_mem_read_lock();
413
414 if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
415 rte_mcfg_mem_read_unlock();
416 return -1;
417 }
418
419 ret = rte_mem_event_callback_register("fslmc_memevent_clb",
420 fslmc_memevent_cb, NULL);
421 if (ret && rte_errno == ENOTSUP)
422 DPAA2_BUS_DEBUG("Memory event callbacks not supported");
423 else if (ret)
424 DPAA2_BUS_DEBUG("Unable to install memory handler");
425 else
426 DPAA2_BUS_DEBUG("Installed memory callback handler");
427
428 DPAA2_BUS_DEBUG("Total %d segments found.", i);
429
430
431
432
433
434 vfio_map_irq_region(&vfio_group);
435
436
437
438
439 rte_mcfg_mem_read_unlock();
440
441 return 0;
442}
443
444static int
445fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
446 int *vfio_dev_fd, struct vfio_device_info *device_info)
447{
448 struct vfio_group_status group_status = {
449 .argsz = sizeof(group_status)
450 };
451 int vfio_group_fd, vfio_container_fd, iommu_group_no, ret;
452
453
454 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no);
455 if (ret < 0)
456 return -1;
457
458
459 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no);
460 if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
461 return -1;
462
463
464
465
466
467 if (vfio_group_fd == -ENOENT) {
468 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
469 dev_addr);
470 return 1;
471 }
472
473
474 vfio_container_fd = rte_vfio_get_container_fd();
475 if (vfio_container_fd < 0) {
476 DPAA2_BUS_ERR("Failed to open VFIO container");
477 return -errno;
478 }
479
480
481 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
482 if (ret) {
483 DPAA2_BUS_ERR(" %s cannot get group status, "
484 "error %i (%s)\n", dev_addr,
485 errno, strerror(errno));
486 close(vfio_group_fd);
487 rte_vfio_clear_group(vfio_group_fd);
488 return -1;
489 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
490 DPAA2_BUS_ERR(" %s VFIO group is not viable!\n", dev_addr);
491 close(vfio_group_fd);
492 rte_vfio_clear_group(vfio_group_fd);
493 return -1;
494 }
495
496
497
498
499
500 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
501
502
503 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
504 &vfio_container_fd);
505 if (ret) {
506 DPAA2_BUS_ERR(" %s cannot add VFIO group to container, "
507 "error %i (%s)\n", dev_addr,
508 errno, strerror(errno));
509 close(vfio_group_fd);
510 close(vfio_container_fd);
511 rte_vfio_clear_group(vfio_group_fd);
512 return -1;
513 }
514
515
516
517
518
519 if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
520 fslmc_iommu_type)) {
521 ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
522 fslmc_iommu_type);
523 if (ret) {
524 DPAA2_BUS_ERR("Failed to setup VFIO iommu");
525 close(vfio_group_fd);
526 close(vfio_container_fd);
527 return -errno;
528 }
529 } else {
530 DPAA2_BUS_ERR("No supported IOMMU available");
531 close(vfio_group_fd);
532 close(vfio_container_fd);
533 return -EINVAL;
534 }
535 }
536
537
538 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
539 if (*vfio_dev_fd < 0) {
540
541
542
543
544 DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr);
545 close(vfio_group_fd);
546 close(vfio_container_fd);
547 rte_vfio_clear_group(vfio_group_fd);
548 return -1;
549 }
550
551
552 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
553 if (ret) {
554 DPAA2_BUS_ERR(" %s cannot get device info, error %i (%s)",
555 dev_addr, errno, strerror(errno));
556 close(*vfio_dev_fd);
557 close(vfio_group_fd);
558 close(vfio_container_fd);
559 rte_vfio_clear_group(vfio_group_fd);
560 return -1;
561 }
562
563 return 0;
564}
565
566static intptr_t vfio_map_mcp_obj(const char *mcp_obj)
567{
568 intptr_t v_addr = (intptr_t)MAP_FAILED;
569 int32_t ret, mc_fd;
570 struct vfio_group_status status = { .argsz = sizeof(status) };
571
572 struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
573 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
574
575 fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj,
576 &mc_fd, &d_info);
577
578
579 ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
580 if (ret < 0) {
581 DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
582 goto MC_FAILURE;
583 }
584
585 v_addr = (size_t)mmap(NULL, reg_info.size,
586 PROT_WRITE | PROT_READ, MAP_SHARED,
587 mc_fd, reg_info.offset);
588
589MC_FAILURE:
590 close(mc_fd);
591
592 return v_addr;
593}
594
595#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
596
597int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
598{
599 int len, ret;
600 char irq_set_buf[IRQ_SET_BUF_LEN];
601 struct vfio_irq_set *irq_set;
602 int *fd_ptr, vfio_dev_fd;
603
604 len = sizeof(irq_set_buf);
605
606 irq_set = (struct vfio_irq_set *)irq_set_buf;
607 irq_set->argsz = len;
608 irq_set->count = 1;
609 irq_set->flags =
610 VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
611 irq_set->index = index;
612 irq_set->start = 0;
613 fd_ptr = (int *)&irq_set->data;
614 *fd_ptr = rte_intr_fd_get(intr_handle);
615
616 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
617 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
618 if (ret) {
619 DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
620 rte_intr_fd_get(intr_handle), errno,
621 strerror(errno));
622 return ret;
623 }
624
625 return ret;
626}
627
628int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
629{
630 struct vfio_irq_set *irq_set;
631 char irq_set_buf[IRQ_SET_BUF_LEN];
632 int len, ret, vfio_dev_fd;
633
634 len = sizeof(struct vfio_irq_set);
635
636 irq_set = (struct vfio_irq_set *)irq_set_buf;
637 irq_set->argsz = len;
638 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
639 irq_set->index = index;
640 irq_set->start = 0;
641 irq_set->count = 0;
642
643 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
644 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
645 if (ret)
646 DPAA2_BUS_ERR(
647 "Error disabling dpaa2 interrupts for fd %d",
648 rte_intr_fd_get(intr_handle));
649
650 return ret;
651}
652
653
654int
655rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
656 int vfio_dev_fd,
657 int num_irqs)
658{
659 int i, ret;
660
661
662 for (i = 0; i < num_irqs; i++) {
663 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
664 int fd = -1;
665
666 irq_info.index = i;
667
668 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
669 if (ret < 0) {
670 DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
671 i, errno, strerror(errno));
672 return -1;
673 }
674
675
676
677
678
679 if ((irq_info.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
680 continue;
681
682
683 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
684 if (fd < 0) {
685 DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
686 errno, strerror(errno));
687 return -1;
688 }
689
690 if (rte_intr_fd_set(intr_handle, fd))
691 return -rte_errno;
692
693 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_VFIO_MSI))
694 return -rte_errno;
695
696 if (rte_intr_dev_fd_set(intr_handle, vfio_dev_fd))
697 return -rte_errno;
698
699 return 0;
700 }
701
702
703 return -1;
704}
705
706
707
708
709
710static int
711fslmc_process_iodevices(struct rte_dpaa2_device *dev)
712{
713 int dev_fd;
714 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
715 struct rte_dpaa2_object *object = NULL;
716
717 fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name,
718 &dev_fd, &device_info);
719
720 switch (dev->dev_type) {
721 case DPAA2_ETH:
722 rte_dpaa2_vfio_setup_intr(dev->intr_handle, dev_fd,
723 device_info.num_irqs);
724 break;
725 case DPAA2_CON:
726 case DPAA2_IO:
727 case DPAA2_CI:
728 case DPAA2_BPOOL:
729 case DPAA2_DPRTC:
730 case DPAA2_MUX:
731 TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
732 if (dev->dev_type == object->dev_type)
733 object->create(dev_fd, &device_info,
734 dev->object_id);
735 else
736 continue;
737 }
738 break;
739 default:
740 break;
741 }
742
743 DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
744 dev->device.name);
745 return 0;
746}
747
748static int
749fslmc_process_mcp(struct rte_dpaa2_device *dev)
750{
751 int ret;
752 intptr_t v_addr;
753 struct fsl_mc_io dpmng = {0};
754 struct mc_version mc_ver_info = {0};
755
756 rte_mcp_ptr_list = malloc(sizeof(void *) * (MC_PORTAL_INDEX + 1));
757 if (!rte_mcp_ptr_list) {
758 DPAA2_BUS_ERR("Unable to allocate MC portal memory");
759 ret = -ENOMEM;
760 goto cleanup;
761 }
762
763 v_addr = vfio_map_mcp_obj(dev->device.name);
764 if (v_addr == (intptr_t)MAP_FAILED) {
765 DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
766 ret = -1;
767 goto cleanup;
768 }
769
770
771 dpmng.regs = (void *)v_addr;
772
773
774
775
776 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
777 rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr;
778 return 0;
779 }
780
781 if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
782 DPAA2_BUS_ERR("Unable to obtain MC version");
783 ret = -1;
784 goto cleanup;
785 }
786
787 if ((mc_ver_info.major != MC_VER_MAJOR) ||
788 (mc_ver_info.minor < MC_VER_MINOR)) {
789 DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
790 " Expected %d.%d.x, Detected %d.%d.%d",
791 MC_VER_MAJOR, MC_VER_MINOR,
792 mc_ver_info.major, mc_ver_info.minor,
793 mc_ver_info.revision);
794 ret = -1;
795 goto cleanup;
796 }
797 rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr;
798
799 return 0;
800
801cleanup:
802 if (rte_mcp_ptr_list) {
803 free(rte_mcp_ptr_list);
804 rte_mcp_ptr_list = NULL;
805 }
806
807 return ret;
808}
809
810int
811fslmc_vfio_process_group(void)
812{
813 int ret;
814 int found_mportal = 0;
815 struct rte_dpaa2_device *dev, *dev_temp;
816 bool is_dpmcp_in_blocklist = false, is_dpio_in_blocklist = false;
817 int dpmcp_count = 0, dpio_count = 0, current_device;
818
819 RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next,
820 dev_temp) {
821 if (dev->dev_type == DPAA2_MPORTAL) {
822 dpmcp_count++;
823 if (dev->device.devargs &&
824 dev->device.devargs->policy == RTE_DEV_BLOCKED)
825 is_dpmcp_in_blocklist = true;
826 }
827 if (dev->dev_type == DPAA2_IO) {
828 dpio_count++;
829 if (dev->device.devargs &&
830 dev->device.devargs->policy == RTE_DEV_BLOCKED)
831 is_dpio_in_blocklist = true;
832 }
833 }
834
835
836 current_device = 0;
837 RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next,
838 dev_temp) {
839 if (dev->dev_type == DPAA2_MPORTAL) {
840 current_device++;
841 if (dev->device.devargs &&
842 dev->device.devargs->policy == RTE_DEV_BLOCKED) {
843 DPAA2_BUS_LOG(DEBUG, "%s Blocked, skipping",
844 dev->device.name);
845 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
846 dev, next);
847 continue;
848 }
849
850 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
851 !is_dpmcp_in_blocklist) {
852 if (dpmcp_count == 1 ||
853 current_device != dpmcp_count) {
854 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
855 dev, next);
856 continue;
857 }
858 }
859
860 if (!found_mportal) {
861 ret = fslmc_process_mcp(dev);
862 if (ret) {
863 DPAA2_BUS_ERR("Unable to map MC Portal");
864 return -1;
865 }
866 found_mportal = 1;
867 }
868
869 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
870 free(dev);
871 dev = NULL;
872
873
874
875 }
876 }
877
878
879 if (!found_mportal) {
880 DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
881 return -1;
882 }
883
884 current_device = 0;
885 RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next,
886 dev_temp) {
887 if (dev->dev_type == DPAA2_IO)
888 current_device++;
889 if (dev->device.devargs &&
890 dev->device.devargs->policy == RTE_DEV_BLOCKED) {
891 DPAA2_BUS_LOG(DEBUG, "%s Blocked, skipping",
892 dev->device.name);
893 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
894 continue;
895 }
896 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
897 dev->dev_type != DPAA2_ETH &&
898 dev->dev_type != DPAA2_CRYPTO &&
899 dev->dev_type != DPAA2_QDMA &&
900 dev->dev_type != DPAA2_IO) {
901 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
902 continue;
903 }
904 switch (dev->dev_type) {
905 case DPAA2_ETH:
906 case DPAA2_CRYPTO:
907 case DPAA2_QDMA:
908 ret = fslmc_process_iodevices(dev);
909 if (ret) {
910 DPAA2_BUS_DEBUG("Dev (%s) init failed",
911 dev->device.name);
912 return ret;
913 }
914 break;
915 case DPAA2_CON:
916 case DPAA2_CI:
917 case DPAA2_BPOOL:
918 case DPAA2_DPRTC:
919 case DPAA2_MUX:
920
921
922
923
924
925 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
926 continue;
927
928
929
930
931 ret = fslmc_process_iodevices(dev);
932 if (ret) {
933 DPAA2_BUS_DEBUG("Dev (%s) init failed",
934 dev->device.name);
935 return -1;
936 }
937
938 break;
939 case DPAA2_IO:
940 if (!is_dpio_in_blocklist && dpio_count > 1) {
941 if (rte_eal_process_type() == RTE_PROC_SECONDARY
942 && current_device != dpio_count) {
943 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
944 dev, next);
945 break;
946 }
947 if (rte_eal_process_type() == RTE_PROC_PRIMARY
948 && current_device == dpio_count) {
949 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
950 dev, next);
951 break;
952 }
953 }
954
955 ret = fslmc_process_iodevices(dev);
956 if (ret) {
957 DPAA2_BUS_DEBUG("Dev (%s) init failed",
958 dev->device.name);
959 return -1;
960 }
961
962 break;
963 case DPAA2_UNKNOWN:
964 default:
965
966 DPAA2_BUS_DEBUG("Found unknown device (%s)",
967 dev->device.name);
968 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
969 free(dev);
970 dev = NULL;
971 }
972 }
973
974 return 0;
975}
976
977int
978fslmc_vfio_setup_group(void)
979{
980 int groupid;
981 int ret;
982 struct vfio_group_status status = { .argsz = sizeof(status) };
983
984
985 if (container_device_fd)
986 return 0;
987
988 ret = fslmc_get_container_group(&groupid);
989 if (ret)
990 return ret;
991
992
993
994
995 if (vfio_group.groupid == groupid) {
996 DPAA2_BUS_ERR("groupid already exists %d", groupid);
997 return 0;
998 }
999
1000
1001 ret = rte_vfio_get_group_fd(groupid);
1002 if (ret < 0)
1003 return ret;
1004 vfio_group.fd = ret;
1005
1006
1007 ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
1008 if (ret) {
1009 DPAA2_BUS_ERR("VFIO error getting group status");
1010 close(vfio_group.fd);
1011 rte_vfio_clear_group(vfio_group.fd);
1012 return ret;
1013 }
1014
1015 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1016 DPAA2_BUS_ERR("VFIO group not viable");
1017 close(vfio_group.fd);
1018 rte_vfio_clear_group(vfio_group.fd);
1019 return -EPERM;
1020 }
1021
1022 vfio_group.groupid = groupid;
1023
1024
1025 if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
1026
1027 ret = vfio_connect_container();
1028 if (ret) {
1029 DPAA2_BUS_ERR(
1030 "Error connecting container with groupid %d",
1031 groupid);
1032 close(vfio_group.fd);
1033 rte_vfio_clear_group(vfio_group.fd);
1034 return ret;
1035 }
1036 }
1037
1038
1039 ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, fslmc_container);
1040 if (ret < 0) {
1041 DPAA2_BUS_ERR("Error getting device %s fd from group %d",
1042 fslmc_container, vfio_group.groupid);
1043 close(vfio_group.fd);
1044 rte_vfio_clear_group(vfio_group.fd);
1045 return ret;
1046 }
1047 container_device_fd = ret;
1048 DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
1049 container_device_fd);
1050
1051 return 0;
1052}
1053