1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include "qemu/osdep.h"
56#include "qapi/error.h"
57#include <sys/ioctl.h>
58
59#include "hw/pci/pci.h"
60#include "hw/qdev-properties.h"
61#include "hw/xen/xen.h"
62#include "hw/i386/pc.h"
63#include "hw/xen/xen-legacy-backend.h"
64#include "xen_pt.h"
65#include "qemu/range.h"
66#include "exec/address-spaces.h"
67
68#define XEN_PT_NR_IRQS (256)
69static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
70
71void xen_pt_log(const PCIDevice *d, const char *f, ...)
72{
73 va_list ap;
74
75 va_start(ap, f);
76 if (d) {
77 fprintf(stderr, "[%02x:%02x.%d] ", pci_dev_bus_num(d),
78 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn));
79 }
80 vfprintf(stderr, f, ap);
81 va_end(ap);
82}
83
84
85
86static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len)
87{
88
89 if (addr > 0xFF) {
90 XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. "
91 "(addr: 0x%02x, len: %d)\n", addr, len);
92 return -1;
93 }
94
95
96 if ((len != 1) && (len != 2) && (len != 4)) {
97 XEN_PT_ERR(d, "Failed to access register with invalid access length. "
98 "(addr: 0x%02x, len: %d)\n", addr, len);
99 return -1;
100 }
101
102
103 if (addr & (len - 1)) {
104 XEN_PT_ERR(d, "Failed to access register with invalid access size "
105 "alignment. (addr: 0x%02x, len: %d)\n", addr, len);
106 return -1;
107 }
108
109 return 0;
110}
111
112int xen_pt_bar_offset_to_index(uint32_t offset)
113{
114 int index = 0;
115
116
117 if (offset == PCI_ROM_ADDRESS) {
118 return PCI_ROM_SLOT;
119 }
120
121
122 index = (offset - PCI_BASE_ADDRESS_0) >> 2;
123 if (index >= PCI_NUM_REGIONS) {
124 return -1;
125 }
126
127 return index;
128}
129
130static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len)
131{
132 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
133 uint32_t val = 0;
134 XenPTRegGroup *reg_grp_entry = NULL;
135 XenPTReg *reg_entry = NULL;
136 int rc = 0;
137 int emul_len = 0;
138 uint32_t find_addr = addr;
139
140 if (xen_pt_pci_config_access_check(d, addr, len)) {
141 goto exit;
142 }
143
144
145 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
146 if (reg_grp_entry) {
147
148 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
149
150 val = 0;
151 goto exit;
152 }
153 }
154
155
156 rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len);
157 if (rc < 0) {
158 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
159 memset(&val, 0xff, len);
160 }
161
162
163
164 if (reg_grp_entry == NULL) {
165 goto exit;
166 }
167
168
169 val <<= (addr & 3) << 3;
170 emul_len = len;
171
172
173 while (emul_len > 0) {
174
175 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
176 if (reg_entry) {
177 XenPTRegInfo *reg = reg_entry->reg;
178 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
179 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
180 uint8_t *ptr_val = NULL;
181
182 valid_mask <<= (find_addr - real_offset) << 3;
183 ptr_val = (uint8_t *)&val + (real_offset & 3);
184
185
186 switch (reg->size) {
187 case 1:
188 if (reg->u.b.read) {
189 rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask);
190 }
191 break;
192 case 2:
193 if (reg->u.w.read) {
194 rc = reg->u.w.read(s, reg_entry,
195 (uint16_t *)ptr_val, valid_mask);
196 }
197 break;
198 case 4:
199 if (reg->u.dw.read) {
200 rc = reg->u.dw.read(s, reg_entry,
201 (uint32_t *)ptr_val, valid_mask);
202 }
203 break;
204 }
205
206 if (rc < 0) {
207 xen_shutdown_fatal_error("Internal error: Invalid read "
208 "emulation. (%s, rc: %d)\n",
209 __func__, rc);
210 return 0;
211 }
212
213
214 emul_len -= reg->size;
215 if (emul_len > 0) {
216 find_addr = real_offset + reg->size;
217 }
218 } else {
219
220
221 emul_len--;
222 find_addr++;
223 }
224 }
225
226
227 val >>= ((addr & 3) << 3);
228
229exit:
230 XEN_PT_LOG_CONFIG(d, addr, val, len);
231 return val;
232}
233
234static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
235 uint32_t val, int len)
236{
237 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
238 int index = 0;
239 XenPTRegGroup *reg_grp_entry = NULL;
240 int rc = 0;
241 uint32_t read_val = 0, wb_mask;
242 int emul_len = 0;
243 XenPTReg *reg_entry = NULL;
244 uint32_t find_addr = addr;
245 XenPTRegInfo *reg = NULL;
246 bool wp_flag = false;
247
248 if (xen_pt_pci_config_access_check(d, addr, len)) {
249 return;
250 }
251
252 XEN_PT_LOG_CONFIG(d, addr, val, len);
253
254
255 index = xen_pt_bar_offset_to_index(addr);
256 if ((index >= 0) && (val != 0)) {
257 uint32_t chk = val;
258
259 if (index == PCI_ROM_SLOT)
260 chk |= (uint32_t)~PCI_ROM_ADDRESS_MASK;
261
262 if ((chk != XEN_PT_BAR_ALLF) &&
263 (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) {
264 XEN_PT_WARN(d, "Guest attempt to set address to unused "
265 "Base Address Register. (addr: 0x%02x, len: %d)\n",
266 addr, len);
267 }
268 }
269
270
271 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
272 if (reg_grp_entry) {
273
274 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
275
276 XEN_PT_WARN(d, "Access to 0-Hardwired register. "
277 "(addr: 0x%02x, len: %d)\n", addr, len);
278 return;
279 }
280 }
281
282 rc = xen_host_pci_get_block(&s->real_device, addr,
283 (uint8_t *)&read_val, len);
284 if (rc < 0) {
285 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
286 memset(&read_val, 0xff, len);
287 wb_mask = 0;
288 } else {
289 wb_mask = 0xFFFFFFFF >> ((4 - len) << 3);
290 }
291
292
293 if (reg_grp_entry == NULL) {
294 if (!s->permissive) {
295 wb_mask = 0;
296 wp_flag = true;
297 }
298 goto out;
299 }
300
301 memory_region_transaction_begin();
302 pci_default_write_config(d, addr, val, len);
303
304
305 read_val <<= (addr & 3) << 3;
306 val <<= (addr & 3) << 3;
307 emul_len = len;
308
309
310 while (emul_len > 0) {
311
312 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
313 if (reg_entry) {
314 reg = reg_entry->reg;
315 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
316 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
317 uint8_t *ptr_val = NULL;
318 uint32_t wp_mask = reg->emu_mask | reg->ro_mask;
319
320 valid_mask <<= (find_addr - real_offset) << 3;
321 ptr_val = (uint8_t *)&val + (real_offset & 3);
322 if (!s->permissive) {
323 wp_mask |= reg->res_mask;
324 }
325 if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) {
326 wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3))
327 << ((len - emul_len) << 3));
328 }
329
330
331 switch (reg->size) {
332 case 1:
333 if (reg->u.b.write) {
334 rc = reg->u.b.write(s, reg_entry, ptr_val,
335 read_val >> ((real_offset & 3) << 3),
336 valid_mask);
337 }
338 break;
339 case 2:
340 if (reg->u.w.write) {
341 rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val,
342 (read_val >> ((real_offset & 3) << 3)),
343 valid_mask);
344 }
345 break;
346 case 4:
347 if (reg->u.dw.write) {
348 rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val,
349 (read_val >> ((real_offset & 3) << 3)),
350 valid_mask);
351 }
352 break;
353 }
354
355 if (rc < 0) {
356 xen_shutdown_fatal_error("Internal error: Invalid write"
357 " emulation. (%s, rc: %d)\n",
358 __func__, rc);
359 return;
360 }
361
362
363 emul_len -= reg->size;
364 if (emul_len > 0) {
365 find_addr = real_offset + reg->size;
366 }
367 } else {
368
369
370 if (!s->permissive) {
371 wb_mask &= ~(0xff << ((len - emul_len) << 3));
372
373
374
375
376 if (index < 0) {
377 wp_flag = true;
378 }
379 }
380 emul_len--;
381 find_addr++;
382 }
383 }
384
385
386 val >>= (addr & 3) << 3;
387
388 memory_region_transaction_commit();
389
390out:
391 if (wp_flag && !s->permissive_warned) {
392 s->permissive_warned = true;
393 xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n",
394 addr, len * 2, wb_mask);
395 xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n");
396 xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n");
397 }
398 for (index = 0; wb_mask; index += len) {
399
400 while (!(wb_mask & 0xff)) {
401 index++;
402 wb_mask >>= 8;
403 }
404 len = 0;
405 do {
406 len++;
407 wb_mask >>= 8;
408 } while (wb_mask & 0xff);
409 rc = xen_host_pci_set_block(&s->real_device, addr + index,
410 (uint8_t *)&val + index, len);
411
412 if (rc < 0) {
413 XEN_PT_ERR(d, "xen_host_pci_set_block failed. return value: %d.\n", rc);
414 }
415 }
416}
417
418
419
420static uint64_t xen_pt_bar_read(void *o, hwaddr addr,
421 unsigned size)
422{
423 PCIDevice *d = o;
424
425
426 XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n",
427 addr);
428 return 0;
429}
430static void xen_pt_bar_write(void *o, hwaddr addr, uint64_t val,
431 unsigned size)
432{
433 PCIDevice *d = o;
434
435 XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n",
436 addr);
437}
438
439static const MemoryRegionOps ops = {
440 .endianness = DEVICE_NATIVE_ENDIAN,
441 .read = xen_pt_bar_read,
442 .write = xen_pt_bar_write,
443};
444
445static int xen_pt_register_regions(XenPCIPassthroughState *s, uint16_t *cmd)
446{
447 int i = 0;
448 XenHostPCIDevice *d = &s->real_device;
449
450
451 for (i = 0; i < PCI_ROM_SLOT; i++) {
452 XenHostPCIIORegion *r = &d->io_regions[i];
453 uint8_t type;
454
455 if (r->base_addr == 0 || r->size == 0) {
456 continue;
457 }
458
459 s->bases[i].access.u = r->base_addr;
460
461 if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) {
462 type = PCI_BASE_ADDRESS_SPACE_IO;
463 *cmd |= PCI_COMMAND_IO;
464 } else {
465 type = PCI_BASE_ADDRESS_SPACE_MEMORY;
466 if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) {
467 type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
468 }
469 if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) {
470 type |= PCI_BASE_ADDRESS_MEM_TYPE_64;
471 }
472 *cmd |= PCI_COMMAND_MEMORY;
473 }
474
475 memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev,
476 "xen-pci-pt-bar", r->size);
477 pci_register_bar(&s->dev, i, type, &s->bar[i]);
478
479 XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64
480 " base_addr=0x%08"PRIx64" type: %#x)\n",
481 i, r->size, r->base_addr, type);
482 }
483
484
485 if (d->rom.base_addr && d->rom.size) {
486 uint32_t bar_data = 0;
487
488
489 if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) {
490 return 0;
491 }
492 if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) {
493 bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK;
494 xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data);
495 }
496
497 s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr;
498
499 memory_region_init_io(&s->rom, OBJECT(s), &ops, &s->dev,
500 "xen-pci-pt-rom", d->rom.size);
501 pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH,
502 &s->rom);
503
504 XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64
505 " base_addr=0x%08"PRIx64")\n",
506 d->rom.size, d->rom.base_addr);
507 }
508
509 xen_pt_register_vga_regions(d);
510 return 0;
511}
512
513
514
515static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr)
516{
517 int i = 0;
518
519 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
520 if (mr == &s->bar[i]) {
521 return i;
522 }
523 }
524 if (mr == &s->rom) {
525 return PCI_ROM_SLOT;
526 }
527 return -1;
528}
529
530
531
532
533
534
535
536struct CheckBarArgs {
537 XenPCIPassthroughState *s;
538 pcibus_t addr;
539 pcibus_t size;
540 uint8_t type;
541 bool rc;
542};
543static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque)
544{
545 struct CheckBarArgs *arg = opaque;
546 XenPCIPassthroughState *s = arg->s;
547 uint8_t type = arg->type;
548 int i;
549
550 if (d->devfn == s->dev.devfn) {
551 return;
552 }
553
554
555 for (i = 0; i < PCI_NUM_REGIONS; i++) {
556 const PCIIORegion *r = &d->io_regions[i];
557
558 if (!r->size) {
559 continue;
560 }
561 if ((type & PCI_BASE_ADDRESS_SPACE_IO)
562 != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) {
563 continue;
564 }
565
566 if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) {
567 XEN_PT_WARN(&s->dev,
568 "Overlapped to device [%02x:%02x.%d] Region: %i"
569 " (addr: %#"FMT_PCIBUS", len: %#"FMT_PCIBUS")\n",
570 pci_bus_num(bus), PCI_SLOT(d->devfn),
571 PCI_FUNC(d->devfn), i, r->addr, r->size);
572 arg->rc = true;
573 }
574 }
575}
576
577static void xen_pt_region_update(XenPCIPassthroughState *s,
578 MemoryRegionSection *sec, bool adding)
579{
580 PCIDevice *d = &s->dev;
581 MemoryRegion *mr = sec->mr;
582 int bar = -1;
583 int rc;
584 int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING;
585 struct CheckBarArgs args = {
586 .s = s,
587 .addr = sec->offset_within_address_space,
588 .size = int128_get64(sec->size),
589 .rc = false,
590 };
591
592 bar = xen_pt_bar_from_region(s, mr);
593 if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) {
594 return;
595 }
596
597 if (s->msix && &s->msix->mmio == mr) {
598 if (adding) {
599 s->msix->mmio_base_addr = sec->offset_within_address_space;
600 rc = xen_pt_msix_update_remap(s, s->msix->bar_index);
601 }
602 return;
603 }
604
605 args.type = d->io_regions[bar].type;
606 pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d),
607 xen_pt_check_bar_overlap, &args);
608 if (args.rc) {
609 XEN_PT_WARN(d, "Region: %d (addr: %#"FMT_PCIBUS
610 ", len: %#"FMT_PCIBUS") is overlapped.\n",
611 bar, sec->offset_within_address_space,
612 int128_get64(sec->size));
613 }
614
615 if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) {
616 uint32_t guest_port = sec->offset_within_address_space;
617 uint32_t machine_port = s->bases[bar].access.pio_base;
618 uint32_t size = int128_get64(sec->size);
619 rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
620 guest_port, machine_port, size,
621 op);
622 if (rc) {
623 XEN_PT_ERR(d, "%s ioport mapping failed! (err: %i)\n",
624 adding ? "create new" : "remove old", errno);
625 }
626 } else {
627 pcibus_t guest_addr = sec->offset_within_address_space;
628 pcibus_t machine_addr = s->bases[bar].access.maddr
629 + sec->offset_within_region;
630 pcibus_t size = int128_get64(sec->size);
631 rc = xc_domain_memory_mapping(xen_xc, xen_domid,
632 XEN_PFN(guest_addr + XC_PAGE_SIZE - 1),
633 XEN_PFN(machine_addr + XC_PAGE_SIZE - 1),
634 XEN_PFN(size + XC_PAGE_SIZE - 1),
635 op);
636 if (rc) {
637 XEN_PT_ERR(d, "%s mem mapping failed! (err: %i)\n",
638 adding ? "create new" : "remove old", errno);
639 }
640 }
641}
642
643static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec)
644{
645 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
646 memory_listener);
647
648 memory_region_ref(sec->mr);
649 xen_pt_region_update(s, sec, true);
650}
651
652static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec)
653{
654 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
655 memory_listener);
656
657 xen_pt_region_update(s, sec, false);
658 memory_region_unref(sec->mr);
659}
660
661static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec)
662{
663 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
664 io_listener);
665
666 memory_region_ref(sec->mr);
667 xen_pt_region_update(s, sec, true);
668}
669
670static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec)
671{
672 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
673 io_listener);
674
675 xen_pt_region_update(s, sec, false);
676 memory_region_unref(sec->mr);
677}
678
679static const MemoryListener xen_pt_memory_listener = {
680 .region_add = xen_pt_region_add,
681 .region_del = xen_pt_region_del,
682 .priority = 10,
683};
684
685static const MemoryListener xen_pt_io_listener = {
686 .region_add = xen_pt_io_region_add,
687 .region_del = xen_pt_io_region_del,
688 .priority = 10,
689};
690
691static void
692xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
693 XenHostPCIDevice *dev)
694{
695 uint16_t gpu_dev_id;
696 PCIDevice *d = &s->dev;
697
698 gpu_dev_id = dev->device_id;
699 igd_passthrough_isa_bridge_create(pci_get_bus(d), gpu_dev_id);
700}
701
702
703static void xen_pt_destroy(PCIDevice *d) {
704
705 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
706 XenHostPCIDevice *host_dev = &s->real_device;
707 uint8_t machine_irq = s->machine_irq;
708 uint8_t intx;
709 int rc;
710
711 if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) {
712 intx = xen_pt_pci_intx(s);
713 rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq,
714 PT_IRQ_TYPE_PCI,
715 pci_dev_bus_num(d),
716 PCI_SLOT(s->dev.devfn),
717 intx,
718 0 );
719 if (rc < 0) {
720 XEN_PT_ERR(d, "unbinding of interrupt INT%c failed."
721 " (machine irq: %i, err: %d)"
722 " But bravely continuing on..\n",
723 'a' + intx, machine_irq, errno);
724 }
725 }
726
727
728 if (s->msi) {
729 xen_pt_msi_disable(s);
730 }
731 if (s->msix) {
732 xen_pt_msix_disable(s);
733 }
734
735 if (machine_irq) {
736 xen_pt_mapped_machine_irq[machine_irq]--;
737
738 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
739 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq);
740
741 if (rc < 0) {
742 XEN_PT_ERR(d, "unmapping of interrupt %i failed. (err: %d)"
743 " But bravely continuing on..\n",
744 machine_irq, errno);
745 }
746 }
747 s->machine_irq = 0;
748 }
749
750
751 xen_pt_config_delete(s);
752
753 xen_pt_unregister_vga_regions(host_dev);
754
755 if (s->listener_set) {
756 memory_listener_unregister(&s->memory_listener);
757 memory_listener_unregister(&s->io_listener);
758 s->listener_set = false;
759 }
760 if (!xen_host_pci_device_closed(&s->real_device)) {
761 xen_host_pci_device_put(&s->real_device);
762 }
763}
764
765
766static void xen_pt_realize(PCIDevice *d, Error **errp)
767{
768 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
769 int i, rc = 0;
770 uint8_t machine_irq = 0, scratch;
771 uint16_t cmd = 0;
772 int pirq = XEN_PT_UNASSIGNED_PIRQ;
773 Error *err = NULL;
774
775
776 XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d"
777 " to devfn %#x\n",
778 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
779 s->dev.devfn);
780
781 xen_host_pci_device_get(&s->real_device,
782 s->hostaddr.domain, s->hostaddr.bus,
783 s->hostaddr.slot, s->hostaddr.function,
784 &err);
785 if (err) {
786 error_append_hint(&err, "Failed to \"open\" the real pci device");
787 error_propagate(errp, err);
788 return;
789 }
790
791 s->is_virtfn = s->real_device.is_virtfn;
792 if (s->is_virtfn) {
793 XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
794 s->real_device.domain, s->real_device.bus,
795 s->real_device.dev, s->real_device.func);
796 }
797
798
799 memset(d->config, 0, PCI_CONFIG_SPACE_SIZE);
800
801 s->memory_listener = xen_pt_memory_listener;
802 s->io_listener = xen_pt_io_listener;
803
804
805 if ((s->real_device.domain == 0) && (s->real_device.bus == 0) &&
806 (s->real_device.dev == 2) && (s->real_device.func == 0)) {
807 if (!is_igd_vga_passthrough(&s->real_device)) {
808 error_setg(errp, "Need to enable igd-passthru if you're trying"
809 " to passthrough IGD GFX");
810 xen_host_pci_device_put(&s->real_device);
811 return;
812 }
813
814 xen_pt_setup_vga(s, &s->real_device, &err);
815 if (err) {
816 error_append_hint(&err, "Setup VGA BIOS of passthrough"
817 " GFX failed");
818 error_propagate(errp, err);
819 xen_host_pci_device_put(&s->real_device);
820 return;
821 }
822
823
824 xen_igd_passthrough_isa_bridge_create(s, &s->real_device);
825 }
826
827
828 xen_pt_register_regions(s, &cmd);
829
830
831 xen_pt_config_init(s, &err);
832 if (err) {
833 error_append_hint(&err, "PCI Config space initialisation failed");
834 error_propagate(errp, err);
835 rc = -1;
836 goto err_out;
837 }
838
839
840 rc = xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &scratch);
841 if (rc) {
842 error_setg_errno(errp, errno, "Failed to read PCI_INTERRUPT_PIN");
843 goto err_out;
844 }
845 if (!scratch) {
846 XEN_PT_LOG(d, "no pin interrupt\n");
847 goto out;
848 }
849
850 machine_irq = s->real_device.irq;
851 if (machine_irq == 0) {
852 XEN_PT_LOG(d, "machine irq is 0\n");
853 cmd |= PCI_COMMAND_INTX_DISABLE;
854 goto out;
855 }
856
857 rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
858 if (rc < 0) {
859 error_setg_errno(errp, errno, "Mapping machine irq %u to"
860 " pirq %i failed", machine_irq, pirq);
861
862
863 cmd |= PCI_COMMAND_INTX_DISABLE;
864 machine_irq = 0;
865 s->machine_irq = 0;
866 } else {
867 machine_irq = pirq;
868 s->machine_irq = pirq;
869 xen_pt_mapped_machine_irq[machine_irq]++;
870 }
871
872
873 if (machine_irq != 0) {
874 uint8_t e_intx = xen_pt_pci_intx(s);
875
876 rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq,
877 pci_dev_bus_num(d),
878 PCI_SLOT(d->devfn),
879 e_intx);
880 if (rc < 0) {
881 error_setg_errno(errp, errno, "Binding of interrupt %u failed",
882 e_intx);
883
884
885 cmd |= PCI_COMMAND_INTX_DISABLE;
886 xen_pt_mapped_machine_irq[machine_irq]--;
887
888 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
889 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) {
890 error_setg_errno(errp, errno, "Unmapping of machine"
891 " interrupt %u failed", machine_irq);
892 }
893 }
894 s->machine_irq = 0;
895 }
896 }
897
898out:
899 if (cmd) {
900 uint16_t val;
901
902 rc = xen_host_pci_get_word(&s->real_device, PCI_COMMAND, &val);
903 if (rc) {
904 error_setg_errno(errp, errno, "Failed to read PCI_COMMAND");
905 goto err_out;
906 } else {
907 val |= cmd;
908 rc = xen_host_pci_set_word(&s->real_device, PCI_COMMAND, val);
909 if (rc) {
910 error_setg_errno(errp, errno, "Failed to write PCI_COMMAND"
911 " val = 0x%x", val);
912 goto err_out;
913 }
914 }
915 }
916
917 memory_listener_register(&s->memory_listener, &address_space_memory);
918 memory_listener_register(&s->io_listener, &address_space_io);
919 s->listener_set = true;
920 XEN_PT_LOG(d,
921 "Real physical device %02x:%02x.%d registered successfully\n",
922 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function);
923
924 return;
925
926err_out:
927 for (i = 0; i < PCI_ROM_SLOT; i++) {
928 object_unparent(OBJECT(&s->bar[i]));
929 }
930 object_unparent(OBJECT(&s->rom));
931
932 xen_pt_destroy(d);
933 assert(rc);
934}
935
936static void xen_pt_unregister_device(PCIDevice *d)
937{
938 xen_pt_destroy(d);
939}
940
941static Property xen_pci_passthrough_properties[] = {
942 DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
943 DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false),
944 DEFINE_PROP_END_OF_LIST(),
945};
946
947static void xen_pci_passthrough_instance_init(Object *obj)
948{
949
950
951 PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
952}
953
954static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
955{
956 DeviceClass *dc = DEVICE_CLASS(klass);
957 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
958
959 k->realize = xen_pt_realize;
960 k->exit = xen_pt_unregister_device;
961 k->config_read = xen_pt_pci_read_config;
962 k->config_write = xen_pt_pci_write_config;
963 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
964 dc->desc = "Assign an host PCI device with Xen";
965 dc->props = xen_pci_passthrough_properties;
966};
967
968static void xen_pci_passthrough_finalize(Object *obj)
969{
970 XenPCIPassthroughState *s = XEN_PT_DEVICE(obj);
971
972 xen_pt_msix_delete(s);
973}
974
975static const TypeInfo xen_pci_passthrough_info = {
976 .name = TYPE_XEN_PT_DEVICE,
977 .parent = TYPE_PCI_DEVICE,
978 .instance_size = sizeof(XenPCIPassthroughState),
979 .instance_finalize = xen_pci_passthrough_finalize,
980 .class_init = xen_pci_passthrough_class_init,
981 .instance_init = xen_pci_passthrough_instance_init,
982 .interfaces = (InterfaceInfo[]) {
983 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
984 { INTERFACE_PCIE_DEVICE },
985 { },
986 },
987};
988
989static void xen_pci_passthrough_register_types(void)
990{
991 type_register_static(&xen_pci_passthrough_info);
992}
993
994type_init(xen_pci_passthrough_register_types)
995