1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "qemu/osdep.h"
24#include "qapi/error.h"
25#include <sys/mman.h>
26#include "hw/hw.h"
27#include "hw/i386/pc.h"
28#include "qemu/error-report.h"
29#include "ui/console.h"
30#include "hw/loader.h"
31#include "monitor/monitor.h"
32#include "qemu/range.h"
33#include "sysemu/sysemu.h"
34#include "hw/pci/pci.h"
35#include "hw/pci/msi.h"
36#include "kvm_i386.h"
37#include "hw/pci/pci-assign.h"
38
39#define MSIX_PAGE_SIZE 0x1000
40
41
42#define IORESOURCE_IO 0x00000100
43#define IORESOURCE_MEM 0x00000200
44#define IORESOURCE_IRQ 0x00000400
45#define IORESOURCE_DMA 0x00000800
46#define IORESOURCE_PREFETCH 0x00002000
47#define IORESOURCE_MEM_64 0x00100000
48
49typedef struct PCIRegion {
50 int type;
51 int valid;
52 uint64_t base_addr;
53 uint64_t size;
54 int resource_fd;
55} PCIRegion;
56
57typedef struct PCIDevRegions {
58 uint8_t bus, dev, func;
59 int irq;
60 uint16_t region_number;
61
62
63 PCIRegion regions[PCI_NUM_REGIONS - 1];
64 int config_fd;
65} PCIDevRegions;
66
67typedef struct AssignedDevRegion {
68 MemoryRegion container;
69 MemoryRegion real_iomem;
70 union {
71 uint8_t *r_virtbase;
72 uint32_t r_baseport;
73 } u;
74 pcibus_t e_size;
75 pcibus_t r_size;
76 PCIRegion *region;
77} AssignedDevRegion;
78
79#define ASSIGNED_DEVICE_PREFER_MSI_BIT 0
80#define ASSIGNED_DEVICE_SHARE_INTX_BIT 1
81
82#define ASSIGNED_DEVICE_PREFER_MSI_MASK (1 << ASSIGNED_DEVICE_PREFER_MSI_BIT)
83#define ASSIGNED_DEVICE_SHARE_INTX_MASK (1 << ASSIGNED_DEVICE_SHARE_INTX_BIT)
84
85typedef struct MSIXTableEntry {
86 uint32_t addr_lo;
87 uint32_t addr_hi;
88 uint32_t data;
89 uint32_t ctrl;
90} MSIXTableEntry;
91
92typedef enum AssignedIRQType {
93 ASSIGNED_IRQ_NONE = 0,
94 ASSIGNED_IRQ_INTX_HOST_INTX,
95 ASSIGNED_IRQ_INTX_HOST_MSI,
96 ASSIGNED_IRQ_MSI,
97 ASSIGNED_IRQ_MSIX
98} AssignedIRQType;
99
100typedef struct AssignedDevice {
101 PCIDevice dev;
102 PCIHostDeviceAddress host;
103 uint32_t dev_id;
104 uint32_t features;
105 int intpin;
106 AssignedDevRegion v_addrs[PCI_NUM_REGIONS - 1];
107 PCIDevRegions real_device;
108 PCIINTxRoute intx_route;
109 AssignedIRQType assigned_irq_type;
110 struct {
111#define ASSIGNED_DEVICE_CAP_MSI (1 << 0)
112#define ASSIGNED_DEVICE_CAP_MSIX (1 << 1)
113 uint32_t available;
114#define ASSIGNED_DEVICE_MSI_ENABLED (1 << 0)
115#define ASSIGNED_DEVICE_MSIX_ENABLED (1 << 1)
116#define ASSIGNED_DEVICE_MSIX_MASKED (1 << 2)
117 uint32_t state;
118 } cap;
119 uint8_t emulate_config_read[PCI_CONFIG_SPACE_SIZE];
120 uint8_t emulate_config_write[PCI_CONFIG_SPACE_SIZE];
121 int msi_virq_nr;
122 int *msi_virq;
123 MSIXTableEntry *msix_table;
124 hwaddr msix_table_addr;
125 uint16_t msix_max;
126 MemoryRegion mmio;
127 char *configfd_name;
128 int32_t bootindex;
129} AssignedDevice;
130
131#define TYPE_PCI_ASSIGN "kvm-pci-assign"
132#define PCI_ASSIGN(obj) OBJECT_CHECK(AssignedDevice, (obj), TYPE_PCI_ASSIGN)
133
134static void assigned_dev_update_irq_routing(PCIDevice *dev);
135
136static void assigned_dev_load_option_rom(AssignedDevice *dev);
137
138static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev);
139
140static uint64_t assigned_dev_ioport_rw(AssignedDevRegion *dev_region,
141 hwaddr addr, int size,
142 uint64_t *data)
143{
144 uint64_t val = 0;
145 int fd = dev_region->region->resource_fd;
146
147 if (data) {
148 DEBUG("pwrite data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
149 ", addr="TARGET_FMT_plx"\n", *data, size, addr, addr);
150 if (pwrite(fd, data, size, addr) != size) {
151 error_report("%s - pwrite failed %s", __func__, strerror(errno));
152 }
153 } else {
154 if (pread(fd, &val, size, addr) != size) {
155 error_report("%s - pread failed %s", __func__, strerror(errno));
156 val = (1UL << (size * 8)) - 1;
157 }
158 DEBUG("pread val=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx
159 ", addr=" TARGET_FMT_plx "\n", val, size, addr, addr);
160 }
161 return val;
162}
163
164static void assigned_dev_ioport_write(void *opaque, hwaddr addr,
165 uint64_t data, unsigned size)
166{
167 assigned_dev_ioport_rw(opaque, addr, size, &data);
168}
169
170static uint64_t assigned_dev_ioport_read(void *opaque,
171 hwaddr addr, unsigned size)
172{
173 return assigned_dev_ioport_rw(opaque, addr, size, NULL);
174}
175
176static uint32_t slow_bar_readb(void *opaque, hwaddr addr)
177{
178 AssignedDevRegion *d = opaque;
179 uint8_t *in = d->u.r_virtbase + addr;
180 uint32_t r;
181
182 r = *in;
183 DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
184
185 return r;
186}
187
188static uint32_t slow_bar_readw(void *opaque, hwaddr addr)
189{
190 AssignedDevRegion *d = opaque;
191 uint16_t *in = (uint16_t *)(d->u.r_virtbase + addr);
192 uint32_t r;
193
194 r = *in;
195 DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
196
197 return r;
198}
199
200static uint32_t slow_bar_readl(void *opaque, hwaddr addr)
201{
202 AssignedDevRegion *d = opaque;
203 uint32_t *in = (uint32_t *)(d->u.r_virtbase + addr);
204 uint32_t r;
205
206 r = *in;
207 DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, r);
208
209 return r;
210}
211
212static void slow_bar_writeb(void *opaque, hwaddr addr, uint32_t val)
213{
214 AssignedDevRegion *d = opaque;
215 uint8_t *out = d->u.r_virtbase + addr;
216
217 DEBUG("addr=0x" TARGET_FMT_plx " val=0x%02x\n", addr, val);
218 *out = val;
219}
220
221static void slow_bar_writew(void *opaque, hwaddr addr, uint32_t val)
222{
223 AssignedDevRegion *d = opaque;
224 uint16_t *out = (uint16_t *)(d->u.r_virtbase + addr);
225
226 DEBUG("addr=0x" TARGET_FMT_plx " val=0x%04x\n", addr, val);
227 *out = val;
228}
229
230static void slow_bar_writel(void *opaque, hwaddr addr, uint32_t val)
231{
232 AssignedDevRegion *d = opaque;
233 uint32_t *out = (uint32_t *)(d->u.r_virtbase + addr);
234
235 DEBUG("addr=0x" TARGET_FMT_plx " val=0x%08x\n", addr, val);
236 *out = val;
237}
238
239static const MemoryRegionOps slow_bar_ops = {
240 .old_mmio = {
241 .read = { slow_bar_readb, slow_bar_readw, slow_bar_readl, },
242 .write = { slow_bar_writeb, slow_bar_writew, slow_bar_writel, },
243 },
244 .endianness = DEVICE_NATIVE_ENDIAN,
245};
246
247static void assigned_dev_iomem_setup(PCIDevice *pci_dev, int region_num,
248 pcibus_t e_size)
249{
250 AssignedDevice *r_dev = PCI_ASSIGN(pci_dev);
251 AssignedDevRegion *region = &r_dev->v_addrs[region_num];
252 PCIRegion *real_region = &r_dev->real_device.regions[region_num];
253
254 if (e_size > 0) {
255 memory_region_init(®ion->container, OBJECT(pci_dev),
256 "assigned-dev-container", e_size);
257 memory_region_add_subregion(®ion->container, 0, ®ion->real_iomem);
258
259
260 if (real_region->base_addr <= r_dev->msix_table_addr &&
261 real_region->base_addr + real_region->size >
262 r_dev->msix_table_addr) {
263 uint64_t offset = r_dev->msix_table_addr - real_region->base_addr;
264
265 memory_region_add_subregion_overlap(®ion->container,
266 offset,
267 &r_dev->mmio,
268 1);
269 }
270 }
271}
272
273static const MemoryRegionOps assigned_dev_ioport_ops = {
274 .read = assigned_dev_ioport_read,
275 .write = assigned_dev_ioport_write,
276 .endianness = DEVICE_NATIVE_ENDIAN,
277};
278
279static void assigned_dev_ioport_setup(PCIDevice *pci_dev, int region_num,
280 pcibus_t size)
281{
282 AssignedDevice *r_dev = PCI_ASSIGN(pci_dev);
283 AssignedDevRegion *region = &r_dev->v_addrs[region_num];
284
285 region->e_size = size;
286 memory_region_init(®ion->container, OBJECT(pci_dev),
287 "assigned-dev-container", size);
288 memory_region_init_io(®ion->real_iomem, OBJECT(pci_dev),
289 &assigned_dev_ioport_ops, r_dev->v_addrs + region_num,
290 "assigned-dev-iomem", size);
291 memory_region_add_subregion(®ion->container, 0, ®ion->real_iomem);
292}
293
294static uint32_t assigned_dev_pci_read(PCIDevice *d, int pos, int len)
295{
296 AssignedDevice *pci_dev = PCI_ASSIGN(d);
297 uint32_t val;
298 ssize_t ret;
299 int fd = pci_dev->real_device.config_fd;
300
301again:
302 ret = pread(fd, &val, len, pos);
303 if (ret != len) {
304 if ((ret < 0) && (errno == EINTR || errno == EAGAIN)) {
305 goto again;
306 }
307
308 hw_error("pci read failed, ret = %zd errno = %d\n", ret, errno);
309 }
310
311 return val;
312}
313
314static uint8_t assigned_dev_pci_read_byte(PCIDevice *d, int pos)
315{
316 return (uint8_t)assigned_dev_pci_read(d, pos, 1);
317}
318
319static void assigned_dev_pci_write(PCIDevice *d, int pos, uint32_t val, int len)
320{
321 AssignedDevice *pci_dev = PCI_ASSIGN(d);
322 ssize_t ret;
323 int fd = pci_dev->real_device.config_fd;
324
325again:
326 ret = pwrite(fd, &val, len, pos);
327 if (ret != len) {
328 if ((ret < 0) && (errno == EINTR || errno == EAGAIN)) {
329 goto again;
330 }
331
332 hw_error("pci write failed, ret = %zd errno = %d\n", ret, errno);
333 }
334}
335
336static void assigned_dev_emulate_config_read(AssignedDevice *dev,
337 uint32_t offset, uint32_t len)
338{
339 memset(dev->emulate_config_read + offset, 0xff, len);
340}
341
342static void assigned_dev_direct_config_read(AssignedDevice *dev,
343 uint32_t offset, uint32_t len)
344{
345 memset(dev->emulate_config_read + offset, 0, len);
346}
347
348static void assigned_dev_direct_config_write(AssignedDevice *dev,
349 uint32_t offset, uint32_t len)
350{
351 memset(dev->emulate_config_write + offset, 0, len);
352}
353
354static uint8_t pci_find_cap_offset(PCIDevice *d, uint8_t cap, uint8_t start)
355{
356 int id;
357 int max_cap = 48;
358 int pos = start ? start : PCI_CAPABILITY_LIST;
359 int status;
360
361 status = assigned_dev_pci_read_byte(d, PCI_STATUS);
362 if ((status & PCI_STATUS_CAP_LIST) == 0) {
363 return 0;
364 }
365
366 while (max_cap--) {
367 pos = assigned_dev_pci_read_byte(d, pos);
368 if (pos < 0x40) {
369 break;
370 }
371
372 pos &= ~3;
373 id = assigned_dev_pci_read_byte(d, pos + PCI_CAP_LIST_ID);
374
375 if (id == 0xff) {
376 break;
377 }
378 if (id == cap) {
379 return pos;
380 }
381
382 pos += PCI_CAP_LIST_NEXT;
383 }
384 return 0;
385}
386
387static void assigned_dev_register_regions(PCIRegion *io_regions,
388 unsigned long regions_num,
389 AssignedDevice *pci_dev,
390 Error **errp)
391{
392 uint32_t i;
393 PCIRegion *cur_region = io_regions;
394
395 for (i = 0; i < regions_num; i++, cur_region++) {
396 if (!cur_region->valid) {
397 continue;
398 }
399
400
401 if (cur_region->type & IORESOURCE_MEM) {
402 int t = PCI_BASE_ADDRESS_SPACE_MEMORY;
403 if (cur_region->type & IORESOURCE_PREFETCH) {
404 t |= PCI_BASE_ADDRESS_MEM_PREFETCH;
405 }
406 if (cur_region->type & IORESOURCE_MEM_64) {
407 t |= PCI_BASE_ADDRESS_MEM_TYPE_64;
408 }
409
410
411 pci_dev->v_addrs[i].u.r_virtbase = mmap(NULL, cur_region->size,
412 PROT_WRITE | PROT_READ,
413 MAP_SHARED,
414 cur_region->resource_fd,
415 (off_t)0);
416
417 if (pci_dev->v_addrs[i].u.r_virtbase == MAP_FAILED) {
418 pci_dev->v_addrs[i].u.r_virtbase = NULL;
419 error_setg_errno(errp, errno, "Couldn't mmap 0x%" PRIx64 "!",
420 cur_region->base_addr);
421 return;
422 }
423
424 pci_dev->v_addrs[i].r_size = cur_region->size;
425 pci_dev->v_addrs[i].e_size = 0;
426
427
428 pci_dev->v_addrs[i].u.r_virtbase +=
429 (cur_region->base_addr & 0xFFF);
430
431 if (cur_region->size & 0xFFF) {
432 error_report("PCI region %d at address 0x%" PRIx64 " has "
433 "size 0x%" PRIx64 ", which is not a multiple of "
434 "4K. You might experience some performance hit "
435 "due to that.",
436 i, cur_region->base_addr, cur_region->size);
437 memory_region_init_io(&pci_dev->v_addrs[i].real_iomem,
438 OBJECT(pci_dev), &slow_bar_ops,
439 &pci_dev->v_addrs[i],
440 "assigned-dev-slow-bar",
441 cur_region->size);
442 } else {
443 void *virtbase = pci_dev->v_addrs[i].u.r_virtbase;
444 char name[32];
445 snprintf(name, sizeof(name), "%s.bar%d",
446 object_get_typename(OBJECT(pci_dev)), i);
447 memory_region_init_ram_ptr(&pci_dev->v_addrs[i].real_iomem,
448 OBJECT(pci_dev), name,
449 cur_region->size, virtbase);
450 vmstate_register_ram(&pci_dev->v_addrs[i].real_iomem,
451 &pci_dev->dev.qdev);
452 }
453
454 assigned_dev_iomem_setup(&pci_dev->dev, i, cur_region->size);
455 pci_register_bar((PCIDevice *) pci_dev, i, t,
456 &pci_dev->v_addrs[i].container);
457 continue;
458 } else {
459
460 uint32_t val;
461 int ret;
462
463
464
465
466 ret = pread(pci_dev->v_addrs[i].region->resource_fd, &val, 3, 0);
467 if (ret >= 0) {
468 error_report("Unexpected return from I/O port read: %d", ret);
469 abort();
470 } else if (errno != EINVAL) {
471 error_report("Kernel doesn't support ioport resource "
472 "access, hiding this region.");
473 close(pci_dev->v_addrs[i].region->resource_fd);
474 cur_region->valid = 0;
475 continue;
476 }
477
478 pci_dev->v_addrs[i].u.r_baseport = cur_region->base_addr;
479 pci_dev->v_addrs[i].r_size = cur_region->size;
480 pci_dev->v_addrs[i].e_size = 0;
481
482 assigned_dev_ioport_setup(&pci_dev->dev, i, cur_region->size);
483 pci_register_bar((PCIDevice *) pci_dev, i,
484 PCI_BASE_ADDRESS_SPACE_IO,
485 &pci_dev->v_addrs[i].container);
486 }
487 }
488
489
490}
491
492static void get_real_id(const char *devpath, const char *idname, uint16_t *val,
493 Error **errp)
494{
495 FILE *f;
496 char name[128];
497 long id;
498
499 snprintf(name, sizeof(name), "%s%s", devpath, idname);
500 f = fopen(name, "r");
501 if (f == NULL) {
502 error_setg_file_open(errp, errno, name);
503 return;
504 }
505 if (fscanf(f, "%li\n", &id) == 1) {
506 *val = id;
507 } else {
508 error_setg(errp, "Failed to parse contents of '%s'", name);
509 }
510 fclose(f);
511}
512
513static void get_real_vendor_id(const char *devpath, uint16_t *val,
514 Error **errp)
515{
516 get_real_id(devpath, "vendor", val, errp);
517}
518
519static void get_real_device_id(const char *devpath, uint16_t *val,
520 Error **errp)
521{
522 get_real_id(devpath, "device", val, errp);
523}
524
525static void get_real_device(AssignedDevice *pci_dev, Error **errp)
526{
527 char dir[128], name[128];
528 int fd, r = 0;
529 FILE *f;
530 uint64_t start, end, size, flags;
531 uint16_t id;
532 PCIRegion *rp;
533 PCIDevRegions *dev = &pci_dev->real_device;
534 Error *local_err = NULL;
535
536 dev->region_number = 0;
537
538 snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/%04x:%02x:%02x.%x/",
539 pci_dev->host.domain, pci_dev->host.bus,
540 pci_dev->host.slot, pci_dev->host.function);
541
542 snprintf(name, sizeof(name), "%sconfig", dir);
543
544 if (pci_dev->configfd_name && *pci_dev->configfd_name) {
545 dev->config_fd = monitor_fd_param(cur_mon, pci_dev->configfd_name,
546 &local_err);
547 if (local_err) {
548 error_propagate(errp, local_err);
549 return;
550 }
551 } else {
552 dev->config_fd = open(name, O_RDWR);
553
554 if (dev->config_fd == -1) {
555 error_setg_file_open(errp, errno, name);
556 return;
557 }
558 }
559again:
560 r = read(dev->config_fd, pci_dev->dev.config,
561 pci_config_size(&pci_dev->dev));
562 if (r < 0) {
563 if (errno == EINTR || errno == EAGAIN) {
564 goto again;
565 }
566 error_setg_errno(errp, errno, "read(\"%s\")",
567 (pci_dev->configfd_name && *pci_dev->configfd_name) ?
568 pci_dev->configfd_name : name);
569 return;
570 }
571
572
573 if (pci_dev->dev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
574 pci_dev->dev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
575 } else {
576 pci_dev->dev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
577 }
578
579
580
581
582 memset(&pci_dev->dev.config[PCI_BASE_ADDRESS_0], 0, 24);
583 memset(&pci_dev->dev.config[PCI_ROM_ADDRESS], 0, 4);
584
585 snprintf(name, sizeof(name), "%sresource", dir);
586
587 f = fopen(name, "r");
588 if (f == NULL) {
589 error_setg_file_open(errp, errno, name);
590 return;
591 }
592
593 for (r = 0; r < PCI_ROM_SLOT; r++) {
594 if (fscanf(f, "%" SCNi64 " %" SCNi64 " %" SCNi64 "\n",
595 &start, &end, &flags) != 3) {
596 break;
597 }
598
599 rp = dev->regions + r;
600 rp->valid = 0;
601 rp->resource_fd = -1;
602 size = end - start + 1;
603 flags &= IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH
604 | IORESOURCE_MEM_64;
605 if (size == 0 || (flags & ~IORESOURCE_PREFETCH) == 0) {
606 continue;
607 }
608 if (flags & IORESOURCE_MEM) {
609 flags &= ~IORESOURCE_IO;
610 } else {
611 flags &= ~IORESOURCE_PREFETCH;
612 }
613 snprintf(name, sizeof(name), "%sresource%d", dir, r);
614 fd = open(name, O_RDWR);
615 if (fd == -1) {
616 continue;
617 }
618 rp->resource_fd = fd;
619
620 rp->type = flags;
621 rp->valid = 1;
622 rp->base_addr = start;
623 rp->size = size;
624 pci_dev->v_addrs[r].region = rp;
625 DEBUG("region %d size %" PRIu64 " start 0x%" PRIx64
626 " type %d resource_fd %d\n",
627 r, rp->size, start, rp->type, rp->resource_fd);
628 }
629
630 fclose(f);
631
632
633 get_real_vendor_id(dir, &id, &local_err);
634 if (local_err) {
635 error_propagate(errp, local_err);
636 return;
637 }
638 pci_dev->dev.config[0] = id & 0xff;
639 pci_dev->dev.config[1] = (id & 0xff00) >> 8;
640
641
642 get_real_device_id(dir, &id, &local_err);
643 if (local_err) {
644 error_propagate(errp, local_err);
645 return;
646 }
647 pci_dev->dev.config[2] = id & 0xff;
648 pci_dev->dev.config[3] = (id & 0xff00) >> 8;
649
650 pci_word_test_and_clear_mask(pci_dev->emulate_config_write + PCI_COMMAND,
651 PCI_COMMAND_MASTER | PCI_COMMAND_INTX_DISABLE);
652
653 dev->region_number = r;
654}
655
656static void free_msi_virqs(AssignedDevice *dev)
657{
658 int i;
659
660 for (i = 0; i < dev->msi_virq_nr; i++) {
661 if (dev->msi_virq[i] >= 0) {
662 kvm_irqchip_release_virq(kvm_state, dev->msi_virq[i]);
663 dev->msi_virq[i] = -1;
664 }
665 }
666 g_free(dev->msi_virq);
667 dev->msi_virq = NULL;
668 dev->msi_virq_nr = 0;
669}
670
671static void free_assigned_device(AssignedDevice *dev)
672{
673 int i;
674
675 if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
676 assigned_dev_unregister_msix_mmio(dev);
677 }
678 for (i = 0; i < dev->real_device.region_number; i++) {
679 PCIRegion *pci_region = &dev->real_device.regions[i];
680 AssignedDevRegion *region = &dev->v_addrs[i];
681
682 if (!pci_region->valid) {
683 continue;
684 }
685 if (pci_region->type & IORESOURCE_IO) {
686 if (region->u.r_baseport) {
687 memory_region_del_subregion(®ion->container,
688 ®ion->real_iomem);
689 }
690 } else if (pci_region->type & IORESOURCE_MEM) {
691 if (region->u.r_virtbase) {
692 memory_region_del_subregion(®ion->container,
693 ®ion->real_iomem);
694
695
696 if (pci_region->base_addr <= dev->msix_table_addr &&
697 pci_region->base_addr + pci_region->size >
698 dev->msix_table_addr) {
699 memory_region_del_subregion(®ion->container,
700 &dev->mmio);
701 }
702 if (munmap(region->u.r_virtbase,
703 (pci_region->size + 0xFFF) & 0xFFFFF000)) {
704 error_report("Failed to unmap assigned device region: %s",
705 strerror(errno));
706 }
707 }
708 }
709 if (pci_region->resource_fd >= 0) {
710 close(pci_region->resource_fd);
711 }
712 }
713
714 if (dev->real_device.config_fd >= 0) {
715 close(dev->real_device.config_fd);
716 }
717
718 free_msi_virqs(dev);
719}
720
721
722
723
724
725
726static char *assign_failed_examine(const AssignedDevice *dev)
727{
728 char name[PATH_MAX], dir[PATH_MAX], driver[PATH_MAX] = {}, *ns;
729 uint16_t vendor_id, device_id;
730 int r;
731 Error *local_err = NULL;
732
733 snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
734 dev->host.domain, dev->host.bus, dev->host.slot,
735 dev->host.function);
736
737 snprintf(name, sizeof(name), "%sdriver", dir);
738
739 r = readlink(name, driver, sizeof(driver));
740 if ((r <= 0) || r >= sizeof(driver)) {
741 goto fail;
742 }
743
744 driver[r] = 0;
745 ns = strrchr(driver, '/');
746 if (!ns) {
747 goto fail;
748 }
749
750 ns++;
751
752 if ((get_real_vendor_id(dir, &vendor_id, &local_err), local_err) ||
753 (get_real_device_id(dir, &device_id, &local_err), local_err)) {
754
755
756
757 error_free(local_err);
758 goto fail;
759 }
760
761 return g_strdup_printf(
762 "*** The driver '%s' is occupying your device %04x:%02x:%02x.%x.\n"
763 "***\n"
764 "*** You can try the following commands to free it:\n"
765 "***\n"
766 "*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub/new_id\n"
767 "*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/%s/unbind\n"
768 "*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/"
769 "pci-stub/bind\n"
770 "*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub/remove_id\n"
771 "***\n",
772 ns, dev->host.domain, dev->host.bus, dev->host.slot,
773 dev->host.function, vendor_id, device_id,
774 dev->host.domain, dev->host.bus, dev->host.slot, dev->host.function,
775 ns, dev->host.domain, dev->host.bus, dev->host.slot,
776 dev->host.function, vendor_id, device_id);
777
778fail:
779 return g_strdup("Couldn't find out why.\n");
780}
781
782static void assign_device(AssignedDevice *dev, Error **errp)
783{
784 uint32_t flags = KVM_DEV_ASSIGN_ENABLE_IOMMU;
785 int r;
786
787
788 if (!kvm_check_extension(kvm_state, KVM_CAP_PCI_SEGMENT) &&
789 dev->host.domain) {
790 error_setg(errp, "Can't assign device inside non-zero PCI segment "
791 "as this KVM module doesn't support it.");
792 return;
793 }
794
795 if (!kvm_check_extension(kvm_state, KVM_CAP_IOMMU)) {
796 error_setg(errp, "No IOMMU found. Unable to assign device \"%s\"",
797 dev->dev.qdev.id);
798 return;
799 }
800
801 if (dev->features & ASSIGNED_DEVICE_SHARE_INTX_MASK &&
802 kvm_has_intx_set_mask()) {
803 flags |= KVM_DEV_ASSIGN_PCI_2_3;
804 }
805
806 r = kvm_device_pci_assign(kvm_state, &dev->host, flags, &dev->dev_id);
807 if (r < 0) {
808 switch (r) {
809 case -EBUSY: {
810 char *cause;
811
812 cause = assign_failed_examine(dev);
813 error_setg_errno(errp, -r, "Failed to assign device \"%s\"",
814 dev->dev.qdev.id);
815 error_append_hint(errp, "%s", cause);
816 g_free(cause);
817 break;
818 }
819 default:
820 error_setg_errno(errp, -r, "Failed to assign device \"%s\"",
821 dev->dev.qdev.id);
822 break;
823 }
824 }
825}
826
827static void verify_irqchip_in_kernel(Error **errp)
828{
829 if (kvm_irqchip_in_kernel()) {
830 return;
831 }
832 error_setg(errp, "pci-assign requires KVM with in-kernel irqchip enabled");
833}
834
835static int assign_intx(AssignedDevice *dev, Error **errp)
836{
837 AssignedIRQType new_type;
838 PCIINTxRoute intx_route;
839 bool intx_host_msi;
840 int r;
841 Error *local_err = NULL;
842
843
844 if (assigned_dev_pci_read_byte(&dev->dev, PCI_INTERRUPT_PIN) == 0) {
845 pci_device_set_intx_routing_notifier(&dev->dev, NULL);
846 return 0;
847 }
848
849 verify_irqchip_in_kernel(&local_err);
850 if (local_err) {
851 error_propagate(errp, local_err);
852 return -ENOTSUP;
853 }
854
855 pci_device_set_intx_routing_notifier(&dev->dev,
856 assigned_dev_update_irq_routing);
857
858 intx_route = pci_device_route_intx_to_irq(&dev->dev, dev->intpin);
859 assert(intx_route.mode != PCI_INTX_INVERTED);
860
861 if (!pci_intx_route_changed(&dev->intx_route, &intx_route)) {
862 return 0;
863 }
864
865 switch (dev->assigned_irq_type) {
866 case ASSIGNED_IRQ_INTX_HOST_INTX:
867 case ASSIGNED_IRQ_INTX_HOST_MSI:
868 intx_host_msi = dev->assigned_irq_type == ASSIGNED_IRQ_INTX_HOST_MSI;
869 r = kvm_device_intx_deassign(kvm_state, dev->dev_id, intx_host_msi);
870 break;
871 case ASSIGNED_IRQ_MSI:
872 r = kvm_device_msi_deassign(kvm_state, dev->dev_id);
873 break;
874 case ASSIGNED_IRQ_MSIX:
875 r = kvm_device_msix_deassign(kvm_state, dev->dev_id);
876 break;
877 default:
878 r = 0;
879 break;
880 }
881 if (r) {
882 perror("assign_intx: deassignment of previous interrupt failed");
883 }
884 dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
885
886 if (intx_route.mode == PCI_INTX_DISABLED) {
887 dev->intx_route = intx_route;
888 return 0;
889 }
890
891retry:
892 if (dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK &&
893 dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
894 intx_host_msi = true;
895 new_type = ASSIGNED_IRQ_INTX_HOST_MSI;
896 } else {
897 intx_host_msi = false;
898 new_type = ASSIGNED_IRQ_INTX_HOST_INTX;
899 }
900
901 r = kvm_device_intx_assign(kvm_state, dev->dev_id, intx_host_msi,
902 intx_route.irq);
903 if (r < 0) {
904 if (r == -EIO && !(dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK) &&
905 dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
906
907
908 error_report("Host-side INTx sharing not supported, "
909 "using MSI instead");
910 error_printf("Some devices do not work properly in this mode.\n");
911 dev->features |= ASSIGNED_DEVICE_PREFER_MSI_MASK;
912 goto retry;
913 }
914 error_setg_errno(errp, -r, "Failed to assign irq for \"%s\"",
915 dev->dev.qdev.id);
916 error_append_hint(errp, "Perhaps you are assigning a device "
917 "that shares an IRQ with another device?\n");
918 return r;
919 }
920
921 dev->intx_route = intx_route;
922 dev->assigned_irq_type = new_type;
923 return r;
924}
925
926static void deassign_device(AssignedDevice *dev)
927{
928 int r;
929
930 r = kvm_device_pci_deassign(kvm_state, dev->dev_id);
931 assert(r == 0);
932}
933
934
935
936
937static void assigned_dev_update_irq_routing(PCIDevice *dev)
938{
939 AssignedDevice *assigned_dev = PCI_ASSIGN(dev);
940 Error *err = NULL;
941 int r;
942
943 r = assign_intx(assigned_dev, &err);
944 if (r < 0) {
945 error_report_err(err);
946 err = NULL;
947 qdev_unplug(&dev->qdev, &err);
948 assert(!err);
949 }
950}
951
952static void assigned_dev_update_msi(PCIDevice *pci_dev)
953{
954 AssignedDevice *assigned_dev = PCI_ASSIGN(pci_dev);
955 uint8_t ctrl_byte = pci_get_byte(pci_dev->config + pci_dev->msi_cap +
956 PCI_MSI_FLAGS);
957 int r;
958
959
960
961
962 if (assigned_dev->assigned_irq_type == ASSIGNED_IRQ_MSI ||
963 (ctrl_byte & PCI_MSI_FLAGS_ENABLE)) {
964 r = kvm_device_msi_deassign(kvm_state, assigned_dev->dev_id);
965
966 if (r && r != -ENXIO) {
967 perror("assigned_dev_update_msi: deassign irq");
968 }
969
970 free_msi_virqs(assigned_dev);
971
972 assigned_dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
973 pci_device_set_intx_routing_notifier(pci_dev, NULL);
974 }
975
976 if (ctrl_byte & PCI_MSI_FLAGS_ENABLE) {
977 MSIMessage msg = msi_get_message(pci_dev, 0);
978 int virq;
979
980 virq = kvm_irqchip_add_msi_route(kvm_state, msg, pci_dev);
981 if (virq < 0) {
982 perror("assigned_dev_update_msi: kvm_irqchip_add_msi_route");
983 return;
984 }
985
986 assigned_dev->msi_virq = g_malloc(sizeof(*assigned_dev->msi_virq));
987 assigned_dev->msi_virq_nr = 1;
988 assigned_dev->msi_virq[0] = virq;
989 if (kvm_device_msi_assign(kvm_state, assigned_dev->dev_id, virq) < 0) {
990 perror("assigned_dev_update_msi: kvm_device_msi_assign");
991 }
992
993 assigned_dev->intx_route.mode = PCI_INTX_DISABLED;
994 assigned_dev->intx_route.irq = -1;
995 assigned_dev->assigned_irq_type = ASSIGNED_IRQ_MSI;
996 } else {
997 Error *local_err = NULL;
998
999 assign_intx(assigned_dev, &local_err);
1000 if (local_err) {
1001 error_report_err(local_err);
1002 }
1003 }
1004}
1005
1006static void assigned_dev_update_msi_msg(PCIDevice *pci_dev)
1007{
1008 AssignedDevice *assigned_dev = PCI_ASSIGN(pci_dev);
1009 uint8_t ctrl_byte = pci_get_byte(pci_dev->config + pci_dev->msi_cap +
1010 PCI_MSI_FLAGS);
1011
1012 if (assigned_dev->assigned_irq_type != ASSIGNED_IRQ_MSI ||
1013 !(ctrl_byte & PCI_MSI_FLAGS_ENABLE)) {
1014 return;
1015 }
1016
1017 kvm_irqchip_update_msi_route(kvm_state, assigned_dev->msi_virq[0],
1018 msi_get_message(pci_dev, 0), pci_dev);
1019}
1020
1021static bool assigned_dev_msix_masked(MSIXTableEntry *entry)
1022{
1023 return (entry->ctrl & cpu_to_le32(0x1)) != 0;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static bool assigned_dev_msix_skipped(MSIXTableEntry *entry)
1035{
1036 return !entry->data;
1037}
1038
1039static int assigned_dev_update_msix_mmio(PCIDevice *pci_dev)
1040{
1041 AssignedDevice *adev = PCI_ASSIGN(pci_dev);
1042 uint16_t entries_nr = 0;
1043 int i, r = 0;
1044 MSIXTableEntry *entry = adev->msix_table;
1045 MSIMessage msg;
1046
1047
1048 for (i = 0; i < adev->msix_max; i++, entry++) {
1049 if (assigned_dev_msix_skipped(entry)) {
1050 continue;
1051 }
1052 entries_nr++;
1053 }
1054
1055 DEBUG("MSI-X entries: %d\n", entries_nr);
1056
1057
1058 if (!entries_nr) {
1059 return 0;
1060 }
1061
1062 r = kvm_device_msix_init_vectors(kvm_state, adev->dev_id, entries_nr);
1063 if (r != 0) {
1064 error_report("fail to set MSI-X entry number for MSIX! %s",
1065 strerror(-r));
1066 return r;
1067 }
1068
1069 free_msi_virqs(adev);
1070
1071 adev->msi_virq_nr = adev->msix_max;
1072 adev->msi_virq = g_malloc(adev->msix_max * sizeof(*adev->msi_virq));
1073
1074 entry = adev->msix_table;
1075 for (i = 0; i < adev->msix_max; i++, entry++) {
1076 adev->msi_virq[i] = -1;
1077
1078 if (assigned_dev_msix_skipped(entry)) {
1079 continue;
1080 }
1081
1082 msg.address = entry->addr_lo | ((uint64_t)entry->addr_hi << 32);
1083 msg.data = entry->data;
1084 r = kvm_irqchip_add_msi_route(kvm_state, msg, pci_dev);
1085 if (r < 0) {
1086 return r;
1087 }
1088 adev->msi_virq[i] = r;
1089
1090 DEBUG("MSI-X vector %d, gsi %d, addr %08x_%08x, data %08x\n", i,
1091 r, entry->addr_hi, entry->addr_lo, entry->data);
1092
1093 r = kvm_device_msix_set_vector(kvm_state, adev->dev_id, i,
1094 adev->msi_virq[i]);
1095 if (r) {
1096 error_report("fail to set MSI-X entry! %s", strerror(-r));
1097 break;
1098 }
1099 }
1100
1101 return r;
1102}
1103
1104static void assigned_dev_update_msix(PCIDevice *pci_dev)
1105{
1106 AssignedDevice *assigned_dev = PCI_ASSIGN(pci_dev);
1107 uint16_t ctrl_word = pci_get_word(pci_dev->config + pci_dev->msix_cap +
1108 PCI_MSIX_FLAGS);
1109 int r;
1110
1111
1112
1113
1114 if ((assigned_dev->assigned_irq_type == ASSIGNED_IRQ_MSIX) ||
1115 (ctrl_word & PCI_MSIX_FLAGS_ENABLE)) {
1116 r = kvm_device_msix_deassign(kvm_state, assigned_dev->dev_id);
1117
1118 if (r && r != -ENXIO) {
1119 perror("assigned_dev_update_msix: deassign irq");
1120 }
1121
1122 free_msi_virqs(assigned_dev);
1123
1124 assigned_dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
1125 pci_device_set_intx_routing_notifier(pci_dev, NULL);
1126 }
1127
1128 if (ctrl_word & PCI_MSIX_FLAGS_ENABLE) {
1129 if (assigned_dev_update_msix_mmio(pci_dev) < 0) {
1130 perror("assigned_dev_update_msix_mmio");
1131 return;
1132 }
1133
1134 if (assigned_dev->msi_virq_nr > 0) {
1135 if (kvm_device_msix_assign(kvm_state, assigned_dev->dev_id) < 0) {
1136 perror("assigned_dev_enable_msix: assign irq");
1137 return;
1138 }
1139 }
1140 assigned_dev->intx_route.mode = PCI_INTX_DISABLED;
1141 assigned_dev->intx_route.irq = -1;
1142 assigned_dev->assigned_irq_type = ASSIGNED_IRQ_MSIX;
1143 } else {
1144 Error *local_err = NULL;
1145
1146 assign_intx(assigned_dev, &local_err);
1147 if (local_err) {
1148 error_report_err(local_err);
1149 }
1150 }
1151}
1152
1153static uint32_t assigned_dev_pci_read_config(PCIDevice *pci_dev,
1154 uint32_t address, int len)
1155{
1156 AssignedDevice *assigned_dev = PCI_ASSIGN(pci_dev);
1157 uint32_t virt_val = pci_default_read_config(pci_dev, address, len);
1158 uint32_t real_val, emulate_mask, full_emulation_mask;
1159
1160 emulate_mask = 0;
1161 memcpy(&emulate_mask, assigned_dev->emulate_config_read + address, len);
1162 emulate_mask = le32_to_cpu(emulate_mask);
1163
1164 full_emulation_mask = 0xffffffff >> (32 - len * 8);
1165
1166 if (emulate_mask != full_emulation_mask) {
1167 real_val = assigned_dev_pci_read(pci_dev, address, len);
1168 return (virt_val & emulate_mask) | (real_val & ~emulate_mask);
1169 } else {
1170 return virt_val;
1171 }
1172}
1173
1174static void assigned_dev_pci_write_config(PCIDevice *pci_dev, uint32_t address,
1175 uint32_t val, int len)
1176{
1177 AssignedDevice *assigned_dev = PCI_ASSIGN(pci_dev);
1178 uint16_t old_cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
1179 uint32_t emulate_mask, full_emulation_mask;
1180 int ret;
1181
1182 pci_default_write_config(pci_dev, address, val, len);
1183
1184 if (kvm_has_intx_set_mask() &&
1185 range_covers_byte(address, len, PCI_COMMAND + 1)) {
1186 bool intx_masked = (pci_get_word(pci_dev->config + PCI_COMMAND) &
1187 PCI_COMMAND_INTX_DISABLE);
1188
1189 if (intx_masked != !!(old_cmd & PCI_COMMAND_INTX_DISABLE)) {
1190 ret = kvm_device_intx_set_mask(kvm_state, assigned_dev->dev_id,
1191 intx_masked);
1192 if (ret) {
1193 perror("assigned_dev_pci_write_config: set intx mask");
1194 }
1195 }
1196 }
1197 if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
1198 if (range_covers_byte(address, len,
1199 pci_dev->msi_cap + PCI_MSI_FLAGS)) {
1200 assigned_dev_update_msi(pci_dev);
1201 } else if (ranges_overlap(address, len,
1202 pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 6)) {
1203 assigned_dev_update_msi_msg(pci_dev);
1204 }
1205 }
1206 if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
1207 if (range_covers_byte(address, len,
1208 pci_dev->msix_cap + PCI_MSIX_FLAGS + 1)) {
1209 assigned_dev_update_msix(pci_dev);
1210 }
1211 }
1212
1213 emulate_mask = 0;
1214 memcpy(&emulate_mask, assigned_dev->emulate_config_write + address, len);
1215 emulate_mask = le32_to_cpu(emulate_mask);
1216
1217 full_emulation_mask = 0xffffffff >> (32 - len * 8);
1218
1219 if (emulate_mask != full_emulation_mask) {
1220 if (emulate_mask) {
1221 val &= ~emulate_mask;
1222 val |= assigned_dev_pci_read(pci_dev, address, len) & emulate_mask;
1223 }
1224 assigned_dev_pci_write(pci_dev, address, val, len);
1225 }
1226}
1227
1228static void assigned_dev_setup_cap_read(AssignedDevice *dev, uint32_t offset,
1229 uint32_t len)
1230{
1231 assigned_dev_direct_config_read(dev, offset, len);
1232 assigned_dev_emulate_config_read(dev, offset + PCI_CAP_LIST_NEXT, 1);
1233}
1234
1235static int assigned_device_pci_cap_init(PCIDevice *pci_dev, Error **errp)
1236{
1237 AssignedDevice *dev = PCI_ASSIGN(pci_dev);
1238 PCIRegion *pci_region = dev->real_device.regions;
1239 int ret, pos;
1240 Error *local_err = NULL;
1241
1242
1243 pci_set_byte(pci_dev->config + PCI_CAPABILITY_LIST, 0);
1244 pci_set_word(pci_dev->config + PCI_STATUS,
1245 pci_get_word(pci_dev->config + PCI_STATUS) &
1246 ~PCI_STATUS_CAP_LIST);
1247
1248
1249
1250 pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSI, 0);
1251 if (pos != 0 && kvm_check_extension(kvm_state, KVM_CAP_ASSIGN_DEV_IRQ)) {
1252 verify_irqchip_in_kernel(&local_err);
1253 if (local_err) {
1254 error_propagate(errp, local_err);
1255 return -ENOTSUP;
1256 }
1257 dev->cap.available |= ASSIGNED_DEVICE_CAP_MSI;
1258
1259 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_MSI, pos, 10,
1260 &local_err);
1261 if (ret < 0) {
1262 error_propagate(errp, local_err);
1263 return ret;
1264 }
1265 pci_dev->msi_cap = pos;
1266
1267 pci_set_word(pci_dev->config + pos + PCI_MSI_FLAGS,
1268 pci_get_word(pci_dev->config + pos + PCI_MSI_FLAGS) &
1269 PCI_MSI_FLAGS_QMASK);
1270 pci_set_long(pci_dev->config + pos + PCI_MSI_ADDRESS_LO, 0);
1271 pci_set_word(pci_dev->config + pos + PCI_MSI_DATA_32, 0);
1272
1273
1274 pci_set_word(pci_dev->wmask + pos + PCI_MSI_FLAGS,
1275 PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
1276 pci_set_long(pci_dev->wmask + pos + PCI_MSI_ADDRESS_LO, 0xfffffffc);
1277 pci_set_word(pci_dev->wmask + pos + PCI_MSI_DATA_32, 0xffff);
1278 }
1279
1280 pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSIX, 0);
1281 if (pos != 0 && kvm_device_msix_supported(kvm_state)) {
1282 int bar_nr;
1283 uint32_t msix_table_entry;
1284 uint16_t msix_max;
1285
1286 verify_irqchip_in_kernel(&local_err);
1287 if (local_err) {
1288 error_propagate(errp, local_err);
1289 return -ENOTSUP;
1290 }
1291 dev->cap.available |= ASSIGNED_DEVICE_CAP_MSIX;
1292 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_MSIX, pos, 12,
1293 &local_err);
1294 if (ret < 0) {
1295 error_propagate(errp, local_err);
1296 return ret;
1297 }
1298 pci_dev->msix_cap = pos;
1299
1300 msix_max = (pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS) &
1301 PCI_MSIX_FLAGS_QSIZE) + 1;
1302 msix_max = MIN(msix_max, KVM_MAX_MSIX_PER_DEV);
1303 pci_set_word(pci_dev->config + pos + PCI_MSIX_FLAGS, msix_max - 1);
1304
1305
1306 pci_set_word(pci_dev->wmask + pos + PCI_MSIX_FLAGS,
1307 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
1308
1309 msix_table_entry = pci_get_long(pci_dev->config + pos + PCI_MSIX_TABLE);
1310 bar_nr = msix_table_entry & PCI_MSIX_FLAGS_BIRMASK;
1311 msix_table_entry &= ~PCI_MSIX_FLAGS_BIRMASK;
1312 dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry;
1313 dev->msix_max = msix_max;
1314 }
1315
1316
1317 pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PM, 0);
1318 if (pos) {
1319 uint16_t pmc;
1320
1321 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_PM, pos, PCI_PM_SIZEOF,
1322 &local_err);
1323 if (ret < 0) {
1324 error_propagate(errp, local_err);
1325 return ret;
1326 }
1327
1328 assigned_dev_setup_cap_read(dev, pos, PCI_PM_SIZEOF);
1329
1330 pmc = pci_get_word(pci_dev->config + pos + PCI_CAP_FLAGS);
1331 pmc &= (PCI_PM_CAP_VER_MASK | PCI_PM_CAP_DSI);
1332 pci_set_word(pci_dev->config + pos + PCI_CAP_FLAGS, pmc);
1333
1334
1335
1336 pci_set_word(pci_dev->config + pos + PCI_PM_CTRL,
1337 PCI_PM_CTRL_NO_SOFT_RESET);
1338
1339 pci_set_byte(pci_dev->config + pos + PCI_PM_PPB_EXTENSIONS, 0);
1340 pci_set_byte(pci_dev->config + pos + PCI_PM_DATA_REGISTER, 0);
1341 }
1342
1343 pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_EXP, 0);
1344 if (pos) {
1345 uint8_t version, size = 0;
1346 uint16_t type, devctl, lnksta;
1347 uint32_t devcap, lnkcap;
1348
1349 version = pci_get_byte(pci_dev->config + pos + PCI_EXP_FLAGS);
1350 version &= PCI_EXP_FLAGS_VERS;
1351 if (version == 1) {
1352 size = 0x14;
1353 } else if (version == 2) {
1354
1355
1356
1357
1358
1359
1360 size = MIN(0x3c, PCI_CONFIG_SPACE_SIZE - pos);
1361 if (size < 0x34) {
1362 error_setg(errp, "Invalid size PCIe cap-id 0x%x",
1363 PCI_CAP_ID_EXP);
1364 return -EINVAL;
1365 } else if (size != 0x3c) {
1366 error_report("WARNING, %s: PCIe cap-id 0x%x has "
1367 "non-standard size 0x%x; std size should be 0x3c",
1368 __func__, PCI_CAP_ID_EXP, size);
1369 }
1370 } else if (version == 0) {
1371 uint16_t vid, did;
1372 vid = pci_get_word(pci_dev->config + PCI_VENDOR_ID);
1373 did = pci_get_word(pci_dev->config + PCI_DEVICE_ID);
1374 if (vid == PCI_VENDOR_ID_INTEL && did == 0x10ed) {
1375
1376
1377
1378
1379 size = 0x3c;
1380 }
1381 }
1382
1383 if (size == 0) {
1384 error_setg(errp, "Unsupported PCI express capability version %d",
1385 version);
1386 return -EINVAL;
1387 }
1388
1389 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_EXP, pos, size,
1390 &local_err);
1391 if (ret < 0) {
1392 error_propagate(errp, local_err);
1393 return ret;
1394 }
1395
1396 assigned_dev_setup_cap_read(dev, pos, size);
1397
1398 type = pci_get_word(pci_dev->config + pos + PCI_EXP_FLAGS);
1399 type = (type & PCI_EXP_FLAGS_TYPE) >> 4;
1400 if (type != PCI_EXP_TYPE_ENDPOINT &&
1401 type != PCI_EXP_TYPE_LEG_END && type != PCI_EXP_TYPE_RC_END) {
1402 error_setg(errp, "Device assignment only supports endpoint "
1403 "assignment, device type %d", type);
1404 return -EINVAL;
1405 }
1406
1407
1408
1409
1410
1411 devcap = pci_get_long(pci_dev->config + pos + PCI_EXP_DEVCAP);
1412 devcap &= ~PCI_EXP_DEVCAP_FLR;
1413 pci_set_long(pci_dev->config + pos + PCI_EXP_DEVCAP, devcap);
1414
1415
1416
1417
1418
1419 devctl = pci_get_word(pci_dev->config + pos + PCI_EXP_DEVCTL);
1420 devctl = (devctl & (PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_PAYLOAD)) |
1421 PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
1422 pci_set_word(pci_dev->config + pos + PCI_EXP_DEVCTL, devctl);
1423 devctl = PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_AUX_PME;
1424 pci_set_word(pci_dev->wmask + pos + PCI_EXP_DEVCTL, ~devctl);
1425
1426
1427 pci_set_word(pci_dev->config + pos + PCI_EXP_DEVSTA, 0);
1428
1429
1430 lnkcap = pci_get_long(pci_dev->config + pos + PCI_EXP_LNKCAP);
1431 lnkcap &= (PCI_EXP_LNKCAP_SLS | PCI_EXP_LNKCAP_MLW |
1432 PCI_EXP_LNKCAP_ASPMS | PCI_EXP_LNKCAP_L0SEL |
1433 PCI_EXP_LNKCAP_L1EL);
1434 pci_set_long(pci_dev->config + pos + PCI_EXP_LNKCAP, lnkcap);
1435
1436
1437
1438
1439 lnksta = pci_get_word(pci_dev->config + pos + PCI_EXP_LNKSTA);
1440 lnksta &= (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW);
1441 pci_set_word(pci_dev->config + pos + PCI_EXP_LNKSTA, lnksta);
1442
1443 if (version >= 2) {
1444
1445 pci_set_long(pci_dev->config + pos + PCI_EXP_SLTCAP, 0);
1446 pci_set_word(pci_dev->config + pos + PCI_EXP_SLTCTL, 0);
1447 pci_set_word(pci_dev->config + pos + PCI_EXP_SLTSTA, 0);
1448
1449
1450 pci_set_word(pci_dev->config + pos + PCI_EXP_RTCTL, 0);
1451 pci_set_word(pci_dev->config + pos + PCI_EXP_RTCAP, 0);
1452 pci_set_long(pci_dev->config + pos + PCI_EXP_RTSTA, 0);
1453
1454
1455
1456 }
1457 }
1458
1459 pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PCIX, 0);
1460 if (pos) {
1461 uint16_t cmd;
1462 uint32_t status;
1463
1464
1465 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_PCIX, pos, 8,
1466 &local_err);
1467 if (ret < 0) {
1468 error_propagate(errp, local_err);
1469 return ret;
1470 }
1471
1472 assigned_dev_setup_cap_read(dev, pos, 8);
1473
1474
1475 cmd = pci_get_word(pci_dev->config + pos + PCI_X_CMD);
1476 cmd &= (PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO | PCI_X_CMD_MAX_READ |
1477 PCI_X_CMD_MAX_SPLIT);
1478 pci_set_word(pci_dev->config + pos + PCI_X_CMD, cmd);
1479
1480
1481
1482 status = pci_get_long(pci_dev->config + pos + PCI_X_STATUS);
1483 status &= ~(PCI_X_STATUS_BUS | PCI_X_STATUS_DEVFN);
1484 status |= pci_requester_id(pci_dev);
1485 status &= ~(PCI_X_STATUS_SPL_DISC | PCI_X_STATUS_UNX_SPL |
1486 PCI_X_STATUS_SPL_ERR);
1487 pci_set_long(pci_dev->config + pos + PCI_X_STATUS, status);
1488 }
1489
1490 pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VPD, 0);
1491 if (pos) {
1492
1493 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_VPD, pos, 8,
1494 &local_err);
1495 if (ret < 0) {
1496 error_propagate(errp, local_err);
1497 return ret;
1498 }
1499
1500 assigned_dev_setup_cap_read(dev, pos, 8);
1501
1502
1503 assigned_dev_direct_config_write(dev, pos + 2, 6);
1504 }
1505
1506
1507 for (pos = 0; (pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VNDR, pos));
1508 pos += PCI_CAP_LIST_NEXT) {
1509 uint8_t len = pci_get_byte(pci_dev->config + pos + PCI_CAP_FLAGS);
1510
1511 ret = pci_add_capability2(pci_dev, PCI_CAP_ID_VNDR, pos, len,
1512 &local_err);
1513 if (ret < 0) {
1514 error_propagate(errp, local_err);
1515 return ret;
1516 }
1517
1518 assigned_dev_setup_cap_read(dev, pos, len);
1519
1520
1521 assigned_dev_direct_config_write(dev, pos + 2, len - 2);
1522 }
1523
1524
1525
1526 if ((pci_get_word(pci_dev->config + PCI_STATUS) & PCI_STATUS_CAP_LIST) !=
1527 (assigned_dev_pci_read_byte(pci_dev, PCI_STATUS) &
1528 PCI_STATUS_CAP_LIST)) {
1529 dev->emulate_config_read[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1530 }
1531
1532 return 0;
1533}
1534
1535static uint64_t
1536assigned_dev_msix_mmio_read(void *opaque, hwaddr addr,
1537 unsigned size)
1538{
1539 AssignedDevice *adev = opaque;
1540 uint64_t val;
1541
1542 memcpy(&val, (void *)((uint8_t *)adev->msix_table + addr), size);
1543
1544 return val;
1545}
1546
1547static void assigned_dev_msix_mmio_write(void *opaque, hwaddr addr,
1548 uint64_t val, unsigned size)
1549{
1550 AssignedDevice *adev = opaque;
1551 PCIDevice *pdev = &adev->dev;
1552 uint16_t ctrl;
1553 MSIXTableEntry orig;
1554 int i = addr >> 4;
1555
1556 if (i >= adev->msix_max) {
1557 return;
1558 }
1559
1560 ctrl = pci_get_word(pdev->config + pdev->msix_cap + PCI_MSIX_FLAGS);
1561
1562 DEBUG("write to MSI-X table offset 0x%lx, val 0x%lx\n", addr, val);
1563
1564 if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
1565 orig = adev->msix_table[i];
1566 }
1567
1568 memcpy((uint8_t *)adev->msix_table + addr, &val, size);
1569
1570 if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
1571 MSIXTableEntry *entry = &adev->msix_table[i];
1572
1573 if (!assigned_dev_msix_masked(&orig) &&
1574 assigned_dev_msix_masked(entry)) {
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586 } else if (assigned_dev_msix_masked(&orig) &&
1587 !assigned_dev_msix_masked(entry)) {
1588
1589 if (i >= adev->msi_virq_nr || adev->msi_virq[i] < 0) {
1590
1591 assigned_dev_update_msix(pdev);
1592 return;
1593 } else {
1594
1595 MSIMessage msg;
1596 int ret;
1597
1598 msg.address = entry->addr_lo |
1599 ((uint64_t)entry->addr_hi << 32);
1600 msg.data = entry->data;
1601
1602 ret = kvm_irqchip_update_msi_route(kvm_state,
1603 adev->msi_virq[i], msg,
1604 pdev);
1605 if (ret) {
1606 error_report("Error updating irq routing entry (%d)", ret);
1607 }
1608 }
1609 }
1610 }
1611}
1612
1613static const MemoryRegionOps assigned_dev_msix_mmio_ops = {
1614 .read = assigned_dev_msix_mmio_read,
1615 .write = assigned_dev_msix_mmio_write,
1616 .endianness = DEVICE_NATIVE_ENDIAN,
1617 .valid = {
1618 .min_access_size = 4,
1619 .max_access_size = 8,
1620 },
1621 .impl = {
1622 .min_access_size = 4,
1623 .max_access_size = 8,
1624 },
1625};
1626
1627static void assigned_dev_msix_reset(AssignedDevice *dev)
1628{
1629 MSIXTableEntry *entry;
1630 int i;
1631
1632 if (!dev->msix_table) {
1633 return;
1634 }
1635
1636 memset(dev->msix_table, 0, MSIX_PAGE_SIZE);
1637
1638 for (i = 0, entry = dev->msix_table; i < dev->msix_max; i++, entry++) {
1639 entry->ctrl = cpu_to_le32(0x1);
1640 }
1641}
1642
1643static void assigned_dev_register_msix_mmio(AssignedDevice *dev, Error **errp)
1644{
1645 dev->msix_table = mmap(NULL, MSIX_PAGE_SIZE, PROT_READ|PROT_WRITE,
1646 MAP_ANONYMOUS|MAP_PRIVATE, 0, 0);
1647 if (dev->msix_table == MAP_FAILED) {
1648 error_setg_errno(errp, errno, "failed to allocate msix_table");
1649 dev->msix_table = NULL;
1650 return;
1651 }
1652
1653 assigned_dev_msix_reset(dev);
1654
1655 memory_region_init_io(&dev->mmio, OBJECT(dev), &assigned_dev_msix_mmio_ops,
1656 dev, "assigned-dev-msix", MSIX_PAGE_SIZE);
1657}
1658
1659static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev)
1660{
1661 if (!dev->msix_table) {
1662 return;
1663 }
1664
1665 if (munmap(dev->msix_table, MSIX_PAGE_SIZE) == -1) {
1666 error_report("error unmapping msix_table! %s", strerror(errno));
1667 }
1668 dev->msix_table = NULL;
1669}
1670
1671static const VMStateDescription vmstate_assigned_device = {
1672 .name = "pci-assign",
1673 .unmigratable = 1,
1674};
1675
1676static void reset_assigned_device(DeviceState *dev)
1677{
1678 PCIDevice *pci_dev = PCI_DEVICE(dev);
1679 AssignedDevice *adev = PCI_ASSIGN(pci_dev);
1680 char reset_file[64];
1681 const char reset[] = "1";
1682 int fd, ret;
1683
1684
1685
1686
1687
1688
1689
1690
1691 if (adev->assigned_irq_type == ASSIGNED_IRQ_MSIX) {
1692 uint16_t ctrl = pci_get_word(pci_dev->config +
1693 pci_dev->msix_cap + PCI_MSIX_FLAGS);
1694
1695 pci_set_word(pci_dev->config + pci_dev->msix_cap + PCI_MSIX_FLAGS,
1696 ctrl & ~PCI_MSIX_FLAGS_ENABLE);
1697 assigned_dev_update_msix(pci_dev);
1698 } else if (adev->assigned_irq_type == ASSIGNED_IRQ_MSI) {
1699 uint8_t ctrl = pci_get_byte(pci_dev->config +
1700 pci_dev->msi_cap + PCI_MSI_FLAGS);
1701
1702 pci_set_byte(pci_dev->config + pci_dev->msi_cap + PCI_MSI_FLAGS,
1703 ctrl & ~PCI_MSI_FLAGS_ENABLE);
1704 assigned_dev_update_msi(pci_dev);
1705 }
1706
1707 snprintf(reset_file, sizeof(reset_file),
1708 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/reset",
1709 adev->host.domain, adev->host.bus, adev->host.slot,
1710 adev->host.function);
1711
1712
1713
1714
1715
1716
1717
1718 fd = open(reset_file, O_WRONLY);
1719 if (fd != -1) {
1720 ret = write(fd, reset, strlen(reset));
1721 (void)ret;
1722 close(fd);
1723 }
1724
1725
1726
1727
1728
1729 assigned_dev_pci_write_config(pci_dev, PCI_COMMAND, 0, 1);
1730}
1731
1732static void assigned_realize(struct PCIDevice *pci_dev, Error **errp)
1733{
1734 AssignedDevice *dev = PCI_ASSIGN(pci_dev);
1735 uint8_t e_intx;
1736 int r;
1737 Error *local_err = NULL;
1738
1739 if (!kvm_enabled()) {
1740 error_setg(&local_err, "pci-assign requires KVM support");
1741 goto exit_with_error;
1742 }
1743
1744 if (!dev->host.domain && !dev->host.bus && !dev->host.slot &&
1745 !dev->host.function) {
1746 error_setg(&local_err, "no host device specified");
1747 goto exit_with_error;
1748 }
1749
1750
1751
1752
1753
1754 assigned_dev_emulate_config_read(dev, 0, PCI_CONFIG_SPACE_SIZE);
1755 assigned_dev_direct_config_read(dev, PCI_STATUS, 2);
1756 assigned_dev_direct_config_read(dev, PCI_REVISION_ID, 1);
1757 assigned_dev_direct_config_read(dev, PCI_CLASS_PROG, 3);
1758 assigned_dev_direct_config_read(dev, PCI_CACHE_LINE_SIZE, 1);
1759 assigned_dev_direct_config_read(dev, PCI_LATENCY_TIMER, 1);
1760 assigned_dev_direct_config_read(dev, PCI_BIST, 1);
1761 assigned_dev_direct_config_read(dev, PCI_CARDBUS_CIS, 4);
1762 assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_VENDOR_ID, 2);
1763 assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_ID, 2);
1764 assigned_dev_direct_config_read(dev, PCI_CAPABILITY_LIST + 1, 7);
1765 assigned_dev_direct_config_read(dev, PCI_MIN_GNT, 1);
1766 assigned_dev_direct_config_read(dev, PCI_MAX_LAT, 1);
1767 memcpy(dev->emulate_config_write, dev->emulate_config_read,
1768 sizeof(dev->emulate_config_read));
1769
1770 get_real_device(dev, &local_err);
1771 if (local_err) {
1772 goto out;
1773 }
1774
1775 if (assigned_device_pci_cap_init(pci_dev, &local_err) < 0) {
1776 goto out;
1777 }
1778
1779
1780 if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
1781 assigned_dev_register_msix_mmio(dev, &local_err);
1782 if (local_err) {
1783 goto out;
1784 }
1785 }
1786
1787
1788 assigned_dev_register_regions(dev->real_device.regions,
1789 dev->real_device.region_number, dev,
1790 &local_err);
1791 if (local_err) {
1792 goto out;
1793 }
1794
1795
1796 e_intx = dev->dev.config[PCI_INTERRUPT_PIN] - 1;
1797 dev->intpin = e_intx;
1798 dev->intx_route.mode = PCI_INTX_DISABLED;
1799 dev->intx_route.irq = -1;
1800
1801
1802 assign_device(dev, &local_err);
1803 if (local_err) {
1804 goto out;
1805 }
1806
1807
1808 r = assign_intx(dev, &local_err);
1809 if (r < 0) {
1810 goto assigned_out;
1811 }
1812
1813 assigned_dev_load_option_rom(dev);
1814
1815 return;
1816
1817assigned_out:
1818 deassign_device(dev);
1819
1820out:
1821 free_assigned_device(dev);
1822
1823exit_with_error:
1824 assert(local_err);
1825 error_propagate(errp, local_err);
1826}
1827
1828static void assigned_exitfn(struct PCIDevice *pci_dev)
1829{
1830 AssignedDevice *dev = PCI_ASSIGN(pci_dev);
1831
1832 deassign_device(dev);
1833 free_assigned_device(dev);
1834}
1835
1836static void assigned_dev_instance_init(Object *obj)
1837{
1838 PCIDevice *pci_dev = PCI_DEVICE(obj);
1839 AssignedDevice *d = PCI_ASSIGN(pci_dev);
1840
1841 device_add_bootindex_property(obj, &d->bootindex,
1842 "bootindex", NULL,
1843 &pci_dev->qdev, NULL);
1844}
1845
1846static Property assigned_dev_properties[] = {
1847 DEFINE_PROP_PCI_HOST_DEVADDR("host", AssignedDevice, host),
1848 DEFINE_PROP_BIT("prefer_msi", AssignedDevice, features,
1849 ASSIGNED_DEVICE_PREFER_MSI_BIT, false),
1850 DEFINE_PROP_BIT("share_intx", AssignedDevice, features,
1851 ASSIGNED_DEVICE_SHARE_INTX_BIT, true),
1852 DEFINE_PROP_STRING("configfd", AssignedDevice, configfd_name),
1853 DEFINE_PROP_END_OF_LIST(),
1854};
1855
1856static void assign_class_init(ObjectClass *klass, void *data)
1857{
1858 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1859 DeviceClass *dc = DEVICE_CLASS(klass);
1860
1861 k->realize = assigned_realize;
1862 k->exit = assigned_exitfn;
1863 k->config_read = assigned_dev_pci_read_config;
1864 k->config_write = assigned_dev_pci_write_config;
1865 dc->props = assigned_dev_properties;
1866 dc->vmsd = &vmstate_assigned_device;
1867 dc->reset = reset_assigned_device;
1868 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1869 dc->desc = "KVM-based PCI passthrough";
1870}
1871
1872static const TypeInfo assign_info = {
1873 .name = TYPE_PCI_ASSIGN,
1874 .parent = TYPE_PCI_DEVICE,
1875 .instance_size = sizeof(AssignedDevice),
1876 .class_init = assign_class_init,
1877 .instance_init = assigned_dev_instance_init,
1878};
1879
1880static void assign_register_types(void)
1881{
1882 type_register_static(&assign_info);
1883}
1884
1885type_init(assign_register_types)
1886
1887static void assigned_dev_load_option_rom(AssignedDevice *dev)
1888{
1889 int size = 0;
1890
1891 pci_assign_dev_load_option_rom(&dev->dev, OBJECT(dev), &size,
1892 dev->host.domain, dev->host.bus,
1893 dev->host.slot, dev->host.function);
1894
1895 if (!size) {
1896 error_report("pci-assign: Invalid ROM.");
1897 }
1898}
1899