qemu/hw/vfio/pci.h
<<
>>
Prefs
   1/*
   2 * vfio based device assignment support - PCI devices
   3 *
   4 * Copyright Red Hat, Inc. 2012-2015
   5 *
   6 * Authors:
   7 *  Alex Williamson <alex.williamson@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 */
  12#ifndef HW_VFIO_VFIO_PCI_H
  13#define HW_VFIO_VFIO_PCI_H
  14
  15#include "exec/memory.h"
  16#include "hw/pci/pci.h"
  17#include "hw/vfio/vfio-common.h"
  18#include "qemu/event_notifier.h"
  19#include "qemu/queue.h"
  20#include "qemu/timer.h"
  21#include "qom/object.h"
  22#include "sysemu/kvm.h"
  23
  24#define PCI_ANY_ID (~0)
  25
  26struct VFIOPCIDevice;
  27
  28typedef struct VFIOIOEventFD {
  29    QLIST_ENTRY(VFIOIOEventFD) next;
  30    MemoryRegion *mr;
  31    hwaddr addr;
  32    unsigned size;
  33    uint64_t data;
  34    EventNotifier e;
  35    VFIORegion *region;
  36    hwaddr region_addr;
  37    bool dynamic; /* Added runtime, removed on device reset */
  38    bool vfio;
  39} VFIOIOEventFD;
  40
  41typedef struct VFIOQuirk {
  42    QLIST_ENTRY(VFIOQuirk) next;
  43    void *data;
  44    QLIST_HEAD(, VFIOIOEventFD) ioeventfds;
  45    int nr_mem;
  46    MemoryRegion *mem;
  47    void (*reset)(struct VFIOPCIDevice *vdev, struct VFIOQuirk *quirk);
  48} VFIOQuirk;
  49
  50typedef struct VFIOBAR {
  51    VFIORegion region;
  52    MemoryRegion *mr;
  53    size_t size;
  54    uint8_t type;
  55    bool ioport;
  56    bool mem64;
  57    QLIST_HEAD(, VFIOQuirk) quirks;
  58} VFIOBAR;
  59
  60typedef struct VFIOVGARegion {
  61    MemoryRegion mem;
  62    off_t offset;
  63    int nr;
  64    QLIST_HEAD(, VFIOQuirk) quirks;
  65} VFIOVGARegion;
  66
  67typedef struct VFIOVGA {
  68    off_t fd_offset;
  69    int fd;
  70    VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS];
  71} VFIOVGA;
  72
  73typedef struct VFIOINTx {
  74    bool pending; /* interrupt pending */
  75    bool kvm_accel; /* set when QEMU bypass through KVM enabled */
  76    uint8_t pin; /* which pin to pull for qemu_set_irq */
  77    EventNotifier interrupt; /* eventfd triggered on interrupt */
  78    EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
  79    PCIINTxRoute route; /* routing info for QEMU bypass */
  80    uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
  81    QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
  82} VFIOINTx;
  83
  84typedef struct VFIOMSIVector {
  85    /*
  86     * Two interrupt paths are configured per vector.  The first, is only used
  87     * for interrupts injected via QEMU.  This is typically the non-accel path,
  88     * but may also be used when we want QEMU to handle masking and pending
  89     * bits.  The KVM path bypasses QEMU and is therefore higher performance,
  90     * but requires masking at the device.  virq is used to track the MSI route
  91     * through KVM, thus kvm_interrupt is only available when virq is set to a
  92     * valid (>= 0) value.
  93     */
  94    EventNotifier interrupt;
  95    EventNotifier kvm_interrupt;
  96    struct VFIOPCIDevice *vdev; /* back pointer to device */
  97    int virq;
  98    bool use;
  99} VFIOMSIVector;
 100
 101enum {
 102    VFIO_INT_NONE = 0,
 103    VFIO_INT_INTx = 1,
 104    VFIO_INT_MSI  = 2,
 105    VFIO_INT_MSIX = 3,
 106};
 107
 108/* Cache of MSI-X setup */
 109typedef struct VFIOMSIXInfo {
 110    uint8_t table_bar;
 111    uint8_t pba_bar;
 112    uint16_t entries;
 113    uint32_t table_offset;
 114    uint32_t pba_offset;
 115    unsigned long *pending;
 116} VFIOMSIXInfo;
 117
 118#define TYPE_VFIO_PCI "vfio-pci"
 119OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI)
 120
 121struct VFIOPCIDevice {
 122    PCIDevice pdev;
 123    VFIODevice vbasedev;
 124    VFIOINTx intx;
 125    unsigned int config_size;
 126    uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
 127    off_t config_offset; /* Offset of config space region within device fd */
 128    unsigned int rom_size;
 129    off_t rom_offset; /* Offset of ROM region within device fd */
 130    void *rom;
 131    int msi_cap_size;
 132    VFIOMSIVector *msi_vectors;
 133    VFIOMSIXInfo *msix;
 134    int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
 135    int interrupt; /* Current interrupt type */
 136    VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
 137    VFIOVGA *vga; /* 0xa0000, 0x3b0, 0x3c0 */
 138    void *igd_opregion;
 139    PCIHostDeviceAddress host;
 140    EventNotifier err_notifier;
 141    EventNotifier req_notifier;
 142    int (*resetfn)(struct VFIOPCIDevice *);
 143    uint32_t vendor_id;
 144    uint32_t device_id;
 145    uint32_t sub_vendor_id;
 146    uint32_t sub_device_id;
 147    uint32_t features;
 148#define VFIO_FEATURE_ENABLE_VGA_BIT 0
 149#define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
 150#define VFIO_FEATURE_ENABLE_REQ_BIT 1
 151#define VFIO_FEATURE_ENABLE_REQ (1 << VFIO_FEATURE_ENABLE_REQ_BIT)
 152#define VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT 2
 153#define VFIO_FEATURE_ENABLE_IGD_OPREGION \
 154                                (1 << VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT)
 155    OnOffAuto display;
 156    uint32_t display_xres;
 157    uint32_t display_yres;
 158    int32_t bootindex;
 159    uint32_t igd_gms;
 160    OffAutoPCIBAR msix_relo;
 161    uint8_t pm_cap;
 162    uint8_t nv_gpudirect_clique;
 163    bool pci_aer;
 164    bool req_enabled;
 165    bool has_flr;
 166    bool has_pm_reset;
 167    bool rom_read_failed;
 168    bool no_kvm_intx;
 169    bool no_kvm_msi;
 170    bool no_kvm_msix;
 171    bool no_geforce_quirks;
 172    bool no_kvm_ioeventfd;
 173    bool no_vfio_ioeventfd;
 174    bool enable_ramfb;
 175    bool defer_kvm_irq_routing;
 176    VFIODisplay *dpy;
 177    Notifier irqchip_change_notifier;
 178};
 179
 180/* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */
 181static inline bool vfio_pci_is(VFIOPCIDevice *vdev, uint32_t vendor, uint32_t device)
 182{
 183    return (vendor == PCI_ANY_ID || vendor == vdev->vendor_id) &&
 184           (device == PCI_ANY_ID || device == vdev->device_id);
 185}
 186
 187static inline bool vfio_is_vga(VFIOPCIDevice *vdev)
 188{
 189    PCIDevice *pdev = &vdev->pdev;
 190    uint16_t class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
 191
 192    return class == PCI_CLASS_DISPLAY_VGA;
 193}
 194
 195uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
 196void vfio_pci_write_config(PCIDevice *pdev,
 197                           uint32_t addr, uint32_t val, int len);
 198
 199uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size);
 200void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size);
 201
 202bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev);
 203void vfio_vga_quirk_setup(VFIOPCIDevice *vdev);
 204void vfio_vga_quirk_exit(VFIOPCIDevice *vdev);
 205void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev);
 206void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr);
 207void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr);
 208void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr);
 209void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev);
 210int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp);
 211void vfio_quirk_reset(VFIOPCIDevice *vdev);
 212VFIOQuirk *vfio_quirk_alloc(int nr_mem);
 213void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr);
 214
 215extern const PropertyInfo qdev_prop_nv_gpudirect_clique;
 216
 217int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
 218
 219int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
 220                               struct vfio_region_info *info,
 221                               Error **errp);
 222int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp);
 223int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp);
 224
 225void vfio_display_reset(VFIOPCIDevice *vdev);
 226int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
 227void vfio_display_finalize(VFIOPCIDevice *vdev);
 228
 229#endif /* HW_VFIO_VFIO_PCI_H */
 230