qemu/linux-headers/linux/vfio.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
   2/*
   3 * VFIO API definition
   4 *
   5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
   6 *     Author: Alex Williamson <alex.williamson@redhat.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#ifndef VFIO_H
  13#define VFIO_H
  14
  15#include <linux/types.h>
  16#include <linux/ioctl.h>
  17
  18#define VFIO_API_VERSION        0
  19
  20
  21/* Kernel & User level defines for VFIO IOCTLs. */
  22
  23/* Extensions */
  24
  25#define VFIO_TYPE1_IOMMU                1
  26#define VFIO_SPAPR_TCE_IOMMU            2
  27#define VFIO_TYPE1v2_IOMMU              3
  28/*
  29 * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
  30 * capability is subject to change as groups are added or removed.
  31 */
  32#define VFIO_DMA_CC_IOMMU               4
  33
  34/* Check if EEH is supported */
  35#define VFIO_EEH                        5
  36
  37/* Two-stage IOMMU */
  38#define VFIO_TYPE1_NESTING_IOMMU        6       /* Implies v2 */
  39
  40#define VFIO_SPAPR_TCE_v2_IOMMU         7
  41
  42/*
  43 * The No-IOMMU IOMMU offers no translation or isolation for devices and
  44 * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
  45 * code will taint the host kernel and should be used with extreme caution.
  46 */
  47#define VFIO_NOIOMMU_IOMMU              8
  48
  49/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
  50#define VFIO_UNMAP_ALL                  9
  51
  52/* Supports the vaddr flag for DMA map and unmap */
  53#define VFIO_UPDATE_VADDR               10
  54
  55/*
  56 * The IOCTL interface is designed for extensibility by embedding the
  57 * structure length (argsz) and flags into structures passed between
  58 * kernel and userspace.  We therefore use the _IO() macro for these
  59 * defines to avoid implicitly embedding a size into the ioctl request.
  60 * As structure fields are added, argsz will increase to match and flag
  61 * bits will be defined to indicate additional fields with valid data.
  62 * It's *always* the caller's responsibility to indicate the size of
  63 * the structure passed by setting argsz appropriately.
  64 */
  65
  66#define VFIO_TYPE       (';')
  67#define VFIO_BASE       100
  68
  69/*
  70 * For extension of INFO ioctls, VFIO makes use of a capability chain
  71 * designed after PCI/e capabilities.  A flag bit indicates whether
  72 * this capability chain is supported and a field defined in the fixed
  73 * structure defines the offset of the first capability in the chain.
  74 * This field is only valid when the corresponding bit in the flags
  75 * bitmap is set.  This offset field is relative to the start of the
  76 * INFO buffer, as is the next field within each capability header.
  77 * The id within the header is a shared address space per INFO ioctl,
  78 * while the version field is specific to the capability id.  The
  79 * contents following the header are specific to the capability id.
  80 */
  81struct vfio_info_cap_header {
  82        __u16   id;             /* Identifies capability */
  83        __u16   version;        /* Version specific to the capability ID */
  84        __u32   next;           /* Offset of next capability */
  85};
  86
  87/*
  88 * Callers of INFO ioctls passing insufficiently sized buffers will see
  89 * the capability chain flag bit set, a zero value for the first capability
  90 * offset (if available within the provided argsz), and argsz will be
  91 * updated to report the necessary buffer size.  For compatibility, the
  92 * INFO ioctl will not report error in this case, but the capability chain
  93 * will not be available.
  94 */
  95
  96/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
  97
  98/**
  99 * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
 100 *
 101 * Report the version of the VFIO API.  This allows us to bump the entire
 102 * API version should we later need to add or change features in incompatible
 103 * ways.
 104 * Return: VFIO_API_VERSION
 105 * Availability: Always
 106 */
 107#define VFIO_GET_API_VERSION            _IO(VFIO_TYPE, VFIO_BASE + 0)
 108
 109/**
 110 * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
 111 *
 112 * Check whether an extension is supported.
 113 * Return: 0 if not supported, 1 (or some other positive integer) if supported.
 114 * Availability: Always
 115 */
 116#define VFIO_CHECK_EXTENSION            _IO(VFIO_TYPE, VFIO_BASE + 1)
 117
 118/**
 119 * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
 120 *
 121 * Set the iommu to the given type.  The type must be supported by an
 122 * iommu driver as verified by calling CHECK_EXTENSION using the same
 123 * type.  A group must be set to this file descriptor before this
 124 * ioctl is available.  The IOMMU interfaces enabled by this call are
 125 * specific to the value set.
 126 * Return: 0 on success, -errno on failure
 127 * Availability: When VFIO group attached
 128 */
 129#define VFIO_SET_IOMMU                  _IO(VFIO_TYPE, VFIO_BASE + 2)
 130
 131/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
 132
 133/**
 134 * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
 135 *                                              struct vfio_group_status)
 136 *
 137 * Retrieve information about the group.  Fills in provided
 138 * struct vfio_group_info.  Caller sets argsz.
 139 * Return: 0 on succes, -errno on failure.
 140 * Availability: Always
 141 */
 142struct vfio_group_status {
 143        __u32   argsz;
 144        __u32   flags;
 145#define VFIO_GROUP_FLAGS_VIABLE         (1 << 0)
 146#define VFIO_GROUP_FLAGS_CONTAINER_SET  (1 << 1)
 147};
 148#define VFIO_GROUP_GET_STATUS           _IO(VFIO_TYPE, VFIO_BASE + 3)
 149
 150/**
 151 * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
 152 *
 153 * Set the container for the VFIO group to the open VFIO file
 154 * descriptor provided.  Groups may only belong to a single
 155 * container.  Containers may, at their discretion, support multiple
 156 * groups.  Only when a container is set are all of the interfaces
 157 * of the VFIO file descriptor and the VFIO group file descriptor
 158 * available to the user.
 159 * Return: 0 on success, -errno on failure.
 160 * Availability: Always
 161 */
 162#define VFIO_GROUP_SET_CONTAINER        _IO(VFIO_TYPE, VFIO_BASE + 4)
 163
 164/**
 165 * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
 166 *
 167 * Remove the group from the attached container.  This is the
 168 * opposite of the SET_CONTAINER call and returns the group to
 169 * an initial state.  All device file descriptors must be released
 170 * prior to calling this interface.  When removing the last group
 171 * from a container, the IOMMU will be disabled and all state lost,
 172 * effectively also returning the VFIO file descriptor to an initial
 173 * state.
 174 * Return: 0 on success, -errno on failure.
 175 * Availability: When attached to container
 176 */
 177#define VFIO_GROUP_UNSET_CONTAINER      _IO(VFIO_TYPE, VFIO_BASE + 5)
 178
 179/**
 180 * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
 181 *
 182 * Return a new file descriptor for the device object described by
 183 * the provided string.  The string should match a device listed in
 184 * the devices subdirectory of the IOMMU group sysfs entry.  The
 185 * group containing the device must already be added to this context.
 186 * Return: new file descriptor on success, -errno on failure.
 187 * Availability: When attached to container
 188 */
 189#define VFIO_GROUP_GET_DEVICE_FD        _IO(VFIO_TYPE, VFIO_BASE + 6)
 190
 191/* --------------- IOCTLs for DEVICE file descriptors --------------- */
 192
 193/**
 194 * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
 195 *                                              struct vfio_device_info)
 196 *
 197 * Retrieve information about the device.  Fills in provided
 198 * struct vfio_device_info.  Caller sets argsz.
 199 * Return: 0 on success, -errno on failure.
 200 */
 201struct vfio_device_info {
 202        __u32   argsz;
 203        __u32   flags;
 204#define VFIO_DEVICE_FLAGS_RESET (1 << 0)        /* Device supports reset */
 205#define VFIO_DEVICE_FLAGS_PCI   (1 << 1)        /* vfio-pci device */
 206#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)     /* vfio-platform device */
 207#define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)        /* vfio-amba device */
 208#define VFIO_DEVICE_FLAGS_CCW   (1 << 4)        /* vfio-ccw device */
 209#define VFIO_DEVICE_FLAGS_AP    (1 << 5)        /* vfio-ap device */
 210#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)       /* vfio-fsl-mc device */
 211#define VFIO_DEVICE_FLAGS_CAPS  (1 << 7)        /* Info supports caps */
 212        __u32   num_regions;    /* Max region index + 1 */
 213        __u32   num_irqs;       /* Max IRQ index + 1 */
 214        __u32   cap_offset;     /* Offset within info struct of first cap */
 215};
 216#define VFIO_DEVICE_GET_INFO            _IO(VFIO_TYPE, VFIO_BASE + 7)
 217
 218/*
 219 * Vendor driver using Mediated device framework should provide device_api
 220 * attribute in supported type attribute groups. Device API string should be one
 221 * of the following corresponding to device flags in vfio_device_info structure.
 222 */
 223
 224#define VFIO_DEVICE_API_PCI_STRING              "vfio-pci"
 225#define VFIO_DEVICE_API_PLATFORM_STRING         "vfio-platform"
 226#define VFIO_DEVICE_API_AMBA_STRING             "vfio-amba"
 227#define VFIO_DEVICE_API_CCW_STRING              "vfio-ccw"
 228#define VFIO_DEVICE_API_AP_STRING               "vfio-ap"
 229
 230/*
 231 * The following capabilities are unique to s390 zPCI devices.  Their contents
 232 * are further-defined in vfio_zdev.h
 233 */
 234#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE          1
 235#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP         2
 236#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL          3
 237#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP          4
 238
 239/**
 240 * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
 241 *                                     struct vfio_region_info)
 242 *
 243 * Retrieve information about a device region.  Caller provides
 244 * struct vfio_region_info with index value set.  Caller sets argsz.
 245 * Implementation of region mapping is bus driver specific.  This is
 246 * intended to describe MMIO, I/O port, as well as bus specific
 247 * regions (ex. PCI config space).  Zero sized regions may be used
 248 * to describe unimplemented regions (ex. unimplemented PCI BARs).
 249 * Return: 0 on success, -errno on failure.
 250 */
 251struct vfio_region_info {
 252        __u32   argsz;
 253        __u32   flags;
 254#define VFIO_REGION_INFO_FLAG_READ      (1 << 0) /* Region supports read */
 255#define VFIO_REGION_INFO_FLAG_WRITE     (1 << 1) /* Region supports write */
 256#define VFIO_REGION_INFO_FLAG_MMAP      (1 << 2) /* Region supports mmap */
 257#define VFIO_REGION_INFO_FLAG_CAPS      (1 << 3) /* Info supports caps */
 258        __u32   index;          /* Region index */
 259        __u32   cap_offset;     /* Offset within info struct of first cap */
 260        __u64   size;           /* Region size (bytes) */
 261        __u64   offset;         /* Region offset from start of device fd */
 262};
 263#define VFIO_DEVICE_GET_REGION_INFO     _IO(VFIO_TYPE, VFIO_BASE + 8)
 264
 265/*
 266 * The sparse mmap capability allows finer granularity of specifying areas
 267 * within a region with mmap support.  When specified, the user should only
 268 * mmap the offset ranges specified by the areas array.  mmaps outside of the
 269 * areas specified may fail (such as the range covering a PCI MSI-X table) or
 270 * may result in improper device behavior.
 271 *
 272 * The structures below define version 1 of this capability.
 273 */
 274#define VFIO_REGION_INFO_CAP_SPARSE_MMAP        1
 275
 276struct vfio_region_sparse_mmap_area {
 277        __u64   offset; /* Offset of mmap'able area within region */
 278        __u64   size;   /* Size of mmap'able area */
 279};
 280
 281struct vfio_region_info_cap_sparse_mmap {
 282        struct vfio_info_cap_header header;
 283        __u32   nr_areas;
 284        __u32   reserved;
 285        struct vfio_region_sparse_mmap_area areas[];
 286};
 287
 288/*
 289 * The device specific type capability allows regions unique to a specific
 290 * device or class of devices to be exposed.  This helps solve the problem for
 291 * vfio bus drivers of defining which region indexes correspond to which region
 292 * on the device, without needing to resort to static indexes, as done by
 293 * vfio-pci.  For instance, if we were to go back in time, we might remove
 294 * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
 295 * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
 296 * make a "VGA" device specific type to describe the VGA access space.  This
 297 * means that non-VGA devices wouldn't need to waste this index, and thus the
 298 * address space associated with it due to implementation of device file
 299 * descriptor offsets in vfio-pci.
 300 *
 301 * The current implementation is now part of the user ABI, so we can't use this
 302 * for VGA, but there are other upcoming use cases, such as opregions for Intel
 303 * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
 304 * use this for future additions.
 305 *
 306 * The structure below defines version 1 of this capability.
 307 */
 308#define VFIO_REGION_INFO_CAP_TYPE       2
 309
 310struct vfio_region_info_cap_type {
 311        struct vfio_info_cap_header header;
 312        __u32 type;     /* global per bus driver */
 313        __u32 subtype;  /* type specific */
 314};
 315
 316/*
 317 * List of region types, global per bus driver.
 318 * If you introduce a new type, please add it here.
 319 */
 320
 321/* PCI region type containing a PCI vendor part */
 322#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE        (1 << 31)
 323#define VFIO_REGION_TYPE_PCI_VENDOR_MASK        (0xffff)
 324#define VFIO_REGION_TYPE_GFX                    (1)
 325#define VFIO_REGION_TYPE_CCW                    (2)
 326#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED   (3)
 327
 328/* sub-types for VFIO_REGION_TYPE_PCI_* */
 329
 330/* 8086 vendor PCI sub-types */
 331#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION  (1)
 332#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG  (2)
 333#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG   (3)
 334
 335/* 10de vendor PCI sub-types */
 336/*
 337 * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
 338 *
 339 * Deprecated, region no longer provided
 340 */
 341#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM  (1)
 342
 343/* 1014 vendor PCI sub-types */
 344/*
 345 * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
 346 * to do TLB invalidation on a GPU.
 347 *
 348 * Deprecated, region no longer provided
 349 */
 350#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD    (1)
 351
 352/* sub-types for VFIO_REGION_TYPE_GFX */
 353#define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
 354
 355/**
 356 * struct vfio_region_gfx_edid - EDID region layout.
 357 *
 358 * Set display link state and EDID blob.
 359 *
 360 * The EDID blob has monitor information such as brand, name, serial
 361 * number, physical size, supported video modes and more.
 362 *
 363 * This special region allows userspace (typically qemu) set a virtual
 364 * EDID for the virtual monitor, which allows a flexible display
 365 * configuration.
 366 *
 367 * For the edid blob spec look here:
 368 *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
 369 *
 370 * On linux systems you can find the EDID blob in sysfs:
 371 *    /sys/class/drm/${card}/${connector}/edid
 372 *
 373 * You can use the edid-decode ulility (comes with xorg-x11-utils) to
 374 * decode the EDID blob.
 375 *
 376 * @edid_offset: location of the edid blob, relative to the
 377 *               start of the region (readonly).
 378 * @edid_max_size: max size of the edid blob (readonly).
 379 * @edid_size: actual edid size (read/write).
 380 * @link_state: display link state (read/write).
 381 * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
 382 * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
 383 * @max_xres: max display width (0 == no limitation, readonly).
 384 * @max_yres: max display height (0 == no limitation, readonly).
 385 *
 386 * EDID update protocol:
 387 *   (1) set link-state to down.
 388 *   (2) update edid blob and size.
 389 *   (3) set link-state to up.
 390 */
 391struct vfio_region_gfx_edid {
 392        __u32 edid_offset;
 393        __u32 edid_max_size;
 394        __u32 edid_size;
 395        __u32 max_xres;
 396        __u32 max_yres;
 397        __u32 link_state;
 398#define VFIO_DEVICE_GFX_LINK_STATE_UP    1
 399#define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
 400};
 401
 402/* sub-types for VFIO_REGION_TYPE_CCW */
 403#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD       (1)
 404#define VFIO_REGION_SUBTYPE_CCW_SCHIB           (2)
 405#define VFIO_REGION_SUBTYPE_CCW_CRW             (3)
 406
 407/* sub-types for VFIO_REGION_TYPE_MIGRATION */
 408#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
 409
 410struct vfio_device_migration_info {
 411        __u32 device_state;         /* VFIO device state */
 412#define VFIO_DEVICE_STATE_V1_STOP      (0)
 413#define VFIO_DEVICE_STATE_V1_RUNNING   (1 << 0)
 414#define VFIO_DEVICE_STATE_V1_SAVING    (1 << 1)
 415#define VFIO_DEVICE_STATE_V1_RESUMING  (1 << 2)
 416#define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_V1_RUNNING | \
 417                                     VFIO_DEVICE_STATE_V1_SAVING |  \
 418                                     VFIO_DEVICE_STATE_V1_RESUMING)
 419
 420#define VFIO_DEVICE_STATE_VALID(state) \
 421        (state & VFIO_DEVICE_STATE_V1_RESUMING ? \
 422        (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
 423
 424#define VFIO_DEVICE_STATE_IS_ERROR(state) \
 425        ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
 426                                              VFIO_DEVICE_STATE_V1_RESUMING))
 427
 428#define VFIO_DEVICE_STATE_SET_ERROR(state) \
 429        ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
 430                                             VFIO_DEVICE_STATE_V1_RESUMING)
 431
 432        __u32 reserved;
 433        __u64 pending_bytes;
 434        __u64 data_offset;
 435        __u64 data_size;
 436};
 437
 438/*
 439 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
 440 * which allows direct access to non-MSIX registers which happened to be within
 441 * the same system page.
 442 *
 443 * Even though the userspace gets direct access to the MSIX data, the existing
 444 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
 445 */
 446#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE      3
 447
 448/*
 449 * Capability with compressed real address (aka SSA - small system address)
 450 * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
 451 * and by the userspace to associate a NVLink bridge with a GPU.
 452 *
 453 * Deprecated, capability no longer provided
 454 */
 455#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT     4
 456
 457struct vfio_region_info_cap_nvlink2_ssatgt {
 458        struct vfio_info_cap_header header;
 459        __u64 tgt;
 460};
 461
 462/*
 463 * Capability with an NVLink link speed. The value is read by
 464 * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
 465 * property in the device tree. The value is fixed in the hardware
 466 * and failing to provide the correct value results in the link
 467 * not working with no indication from the driver why.
 468 *
 469 * Deprecated, capability no longer provided
 470 */
 471#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD     5
 472
 473struct vfio_region_info_cap_nvlink2_lnkspd {
 474        struct vfio_info_cap_header header;
 475        __u32 link_speed;
 476        __u32 __pad;
 477};
 478
 479/**
 480 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
 481 *                                  struct vfio_irq_info)
 482 *
 483 * Retrieve information about a device IRQ.  Caller provides
 484 * struct vfio_irq_info with index value set.  Caller sets argsz.
 485 * Implementation of IRQ mapping is bus driver specific.  Indexes
 486 * using multiple IRQs are primarily intended to support MSI-like
 487 * interrupt blocks.  Zero count irq blocks may be used to describe
 488 * unimplemented interrupt types.
 489 *
 490 * The EVENTFD flag indicates the interrupt index supports eventfd based
 491 * signaling.
 492 *
 493 * The MASKABLE flags indicates the index supports MASK and UNMASK
 494 * actions described below.
 495 *
 496 * AUTOMASKED indicates that after signaling, the interrupt line is
 497 * automatically masked by VFIO and the user needs to unmask the line
 498 * to receive new interrupts.  This is primarily intended to distinguish
 499 * level triggered interrupts.
 500 *
 501 * The NORESIZE flag indicates that the interrupt lines within the index
 502 * are setup as a set and new subindexes cannot be enabled without first
 503 * disabling the entire index.  This is used for interrupts like PCI MSI
 504 * and MSI-X where the driver may only use a subset of the available
 505 * indexes, but VFIO needs to enable a specific number of vectors
 506 * upfront.  In the case of MSI-X, where the user can enable MSI-X and
 507 * then add and unmask vectors, it's up to userspace to make the decision
 508 * whether to allocate the maximum supported number of vectors or tear
 509 * down setup and incrementally increase the vectors as each is enabled.
 510 */
 511struct vfio_irq_info {
 512        __u32   argsz;
 513        __u32   flags;
 514#define VFIO_IRQ_INFO_EVENTFD           (1 << 0)
 515#define VFIO_IRQ_INFO_MASKABLE          (1 << 1)
 516#define VFIO_IRQ_INFO_AUTOMASKED        (1 << 2)
 517#define VFIO_IRQ_INFO_NORESIZE          (1 << 3)
 518        __u32   index;          /* IRQ index */
 519        __u32   count;          /* Number of IRQs within this index */
 520};
 521#define VFIO_DEVICE_GET_IRQ_INFO        _IO(VFIO_TYPE, VFIO_BASE + 9)
 522
 523/**
 524 * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
 525 *
 526 * Set signaling, masking, and unmasking of interrupts.  Caller provides
 527 * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
 528 * the range of subindexes being specified.
 529 *
 530 * The DATA flags specify the type of data provided.  If DATA_NONE, the
 531 * operation performs the specified action immediately on the specified
 532 * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
 533 * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
 534 *
 535 * DATA_BOOL allows sparse support for the same on arrays of interrupts.
 536 * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
 537 * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
 538 * data = {1,0,1}
 539 *
 540 * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
 541 * A value of -1 can be used to either de-assign interrupts if already
 542 * assigned or skip un-assigned interrupts.  For example, to set an eventfd
 543 * to be trigger for interrupts [0,0] and [0,2]:
 544 * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
 545 * data = {fd1, -1, fd2}
 546 * If index [0,1] is previously set, two count = 1 ioctls calls would be
 547 * required to set [0,0] and [0,2] without changing [0,1].
 548 *
 549 * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
 550 * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
 551 * from userspace (ie. simulate hardware triggering).
 552 *
 553 * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
 554 * enables the interrupt index for the device.  Individual subindex interrupts
 555 * can be disabled using the -1 value for DATA_EVENTFD or the index can be
 556 * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
 557 *
 558 * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
 559 * ACTION_TRIGGER specifies kernel->user signaling.
 560 */
 561struct vfio_irq_set {
 562        __u32   argsz;
 563        __u32   flags;
 564#define VFIO_IRQ_SET_DATA_NONE          (1 << 0) /* Data not present */
 565#define VFIO_IRQ_SET_DATA_BOOL          (1 << 1) /* Data is bool (u8) */
 566#define VFIO_IRQ_SET_DATA_EVENTFD       (1 << 2) /* Data is eventfd (s32) */
 567#define VFIO_IRQ_SET_ACTION_MASK        (1 << 3) /* Mask interrupt */
 568#define VFIO_IRQ_SET_ACTION_UNMASK      (1 << 4) /* Unmask interrupt */
 569#define VFIO_IRQ_SET_ACTION_TRIGGER     (1 << 5) /* Trigger interrupt */
 570        __u32   index;
 571        __u32   start;
 572        __u32   count;
 573        __u8    data[];
 574};
 575#define VFIO_DEVICE_SET_IRQS            _IO(VFIO_TYPE, VFIO_BASE + 10)
 576
 577#define VFIO_IRQ_SET_DATA_TYPE_MASK     (VFIO_IRQ_SET_DATA_NONE | \
 578                                         VFIO_IRQ_SET_DATA_BOOL | \
 579                                         VFIO_IRQ_SET_DATA_EVENTFD)
 580#define VFIO_IRQ_SET_ACTION_TYPE_MASK   (VFIO_IRQ_SET_ACTION_MASK | \
 581                                         VFIO_IRQ_SET_ACTION_UNMASK | \
 582                                         VFIO_IRQ_SET_ACTION_TRIGGER)
 583/**
 584 * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
 585 *
 586 * Reset a device.
 587 */
 588#define VFIO_DEVICE_RESET               _IO(VFIO_TYPE, VFIO_BASE + 11)
 589
 590/*
 591 * The VFIO-PCI bus driver makes use of the following fixed region and
 592 * IRQ index mapping.  Unimplemented regions return a size of zero.
 593 * Unimplemented IRQ types return a count of zero.
 594 */
 595
 596enum {
 597        VFIO_PCI_BAR0_REGION_INDEX,
 598        VFIO_PCI_BAR1_REGION_INDEX,
 599        VFIO_PCI_BAR2_REGION_INDEX,
 600        VFIO_PCI_BAR3_REGION_INDEX,
 601        VFIO_PCI_BAR4_REGION_INDEX,
 602        VFIO_PCI_BAR5_REGION_INDEX,
 603        VFIO_PCI_ROM_REGION_INDEX,
 604        VFIO_PCI_CONFIG_REGION_INDEX,
 605        /*
 606         * Expose VGA regions defined for PCI base class 03, subclass 00.
 607         * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
 608         * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
 609         * range is found at it's identity mapped offset from the region
 610         * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
 611         * between described ranges are unimplemented.
 612         */
 613        VFIO_PCI_VGA_REGION_INDEX,
 614        VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
 615                                 /* device specific cap to define content. */
 616};
 617
 618enum {
 619        VFIO_PCI_INTX_IRQ_INDEX,
 620        VFIO_PCI_MSI_IRQ_INDEX,
 621        VFIO_PCI_MSIX_IRQ_INDEX,
 622        VFIO_PCI_ERR_IRQ_INDEX,
 623        VFIO_PCI_REQ_IRQ_INDEX,
 624        VFIO_PCI_NUM_IRQS
 625};
 626
 627/*
 628 * The vfio-ccw bus driver makes use of the following fixed region and
 629 * IRQ index mapping. Unimplemented regions return a size of zero.
 630 * Unimplemented IRQ types return a count of zero.
 631 */
 632
 633enum {
 634        VFIO_CCW_CONFIG_REGION_INDEX,
 635        VFIO_CCW_NUM_REGIONS
 636};
 637
 638enum {
 639        VFIO_CCW_IO_IRQ_INDEX,
 640        VFIO_CCW_CRW_IRQ_INDEX,
 641        VFIO_CCW_REQ_IRQ_INDEX,
 642        VFIO_CCW_NUM_IRQS
 643};
 644
 645/**
 646 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
 647 *                                            struct vfio_pci_hot_reset_info)
 648 *
 649 * Return: 0 on success, -errno on failure:
 650 *      -enospc = insufficient buffer, -enodev = unsupported for device.
 651 */
 652struct vfio_pci_dependent_device {
 653        __u32   group_id;
 654        __u16   segment;
 655        __u8    bus;
 656        __u8    devfn; /* Use PCI_SLOT/PCI_FUNC */
 657};
 658
 659struct vfio_pci_hot_reset_info {
 660        __u32   argsz;
 661        __u32   flags;
 662        __u32   count;
 663        struct vfio_pci_dependent_device        devices[];
 664};
 665
 666#define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO      _IO(VFIO_TYPE, VFIO_BASE + 12)
 667
 668/**
 669 * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
 670 *                                  struct vfio_pci_hot_reset)
 671 *
 672 * Return: 0 on success, -errno on failure.
 673 */
 674struct vfio_pci_hot_reset {
 675        __u32   argsz;
 676        __u32   flags;
 677        __u32   count;
 678        __s32   group_fds[];
 679};
 680
 681#define VFIO_DEVICE_PCI_HOT_RESET       _IO(VFIO_TYPE, VFIO_BASE + 13)
 682
 683/**
 684 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
 685 *                                    struct vfio_device_query_gfx_plane)
 686 *
 687 * Set the drm_plane_type and flags, then retrieve the gfx plane info.
 688 *
 689 * flags supported:
 690 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
 691 *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
 692 *   support for dma-buf.
 693 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
 694 *   to ask if the mdev supports region. 0 on support, -EINVAL on no
 695 *   support for region.
 696 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
 697 *   with each call to query the plane info.
 698 * - Others are invalid and return -EINVAL.
 699 *
 700 * Note:
 701 * 1. Plane could be disabled by guest. In that case, success will be
 702 *    returned with zero-initialized drm_format, size, width and height
 703 *    fields.
 704 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
 705 *
 706 * Return: 0 on success, -errno on other failure.
 707 */
 708struct vfio_device_gfx_plane_info {
 709        __u32 argsz;
 710        __u32 flags;
 711#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
 712#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
 713#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
 714        /* in */
 715        __u32 drm_plane_type;   /* type of plane: DRM_PLANE_TYPE_* */
 716        /* out */
 717        __u32 drm_format;       /* drm format of plane */
 718        __u64 drm_format_mod;   /* tiled mode */
 719        __u32 width;    /* width of plane */
 720        __u32 height;   /* height of plane */
 721        __u32 stride;   /* stride of plane */
 722        __u32 size;     /* size of plane in bytes, align on page*/
 723        __u32 x_pos;    /* horizontal position of cursor plane */
 724        __u32 y_pos;    /* vertical position of cursor plane*/
 725        __u32 x_hot;    /* horizontal position of cursor hotspot */
 726        __u32 y_hot;    /* vertical position of cursor hotspot */
 727        union {
 728                __u32 region_index;     /* region index */
 729                __u32 dmabuf_id;        /* dma-buf id */
 730        };
 731};
 732
 733#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
 734
 735/**
 736 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
 737 *
 738 * Return a new dma-buf file descriptor for an exposed guest framebuffer
 739 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
 740 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
 741 */
 742
 743#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
 744
 745/**
 746 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
 747 *                              struct vfio_device_ioeventfd)
 748 *
 749 * Perform a write to the device at the specified device fd offset, with
 750 * the specified data and width when the provided eventfd is triggered.
 751 * vfio bus drivers may not support this for all regions, for all widths,
 752 * or at all.  vfio-pci currently only enables support for BAR regions,
 753 * excluding the MSI-X vector table.
 754 *
 755 * Return: 0 on success, -errno on failure.
 756 */
 757struct vfio_device_ioeventfd {
 758        __u32   argsz;
 759        __u32   flags;
 760#define VFIO_DEVICE_IOEVENTFD_8         (1 << 0) /* 1-byte write */
 761#define VFIO_DEVICE_IOEVENTFD_16        (1 << 1) /* 2-byte write */
 762#define VFIO_DEVICE_IOEVENTFD_32        (1 << 2) /* 4-byte write */
 763#define VFIO_DEVICE_IOEVENTFD_64        (1 << 3) /* 8-byte write */
 764#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
 765        __u64   offset;                 /* device fd offset of write */
 766        __u64   data;                   /* data to be written */
 767        __s32   fd;                     /* -1 for de-assignment */
 768};
 769
 770#define VFIO_DEVICE_IOEVENTFD           _IO(VFIO_TYPE, VFIO_BASE + 16)
 771
 772/**
 773 * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
 774 *                             struct vfio_device_feature)
 775 *
 776 * Get, set, or probe feature data of the device.  The feature is selected
 777 * using the FEATURE_MASK portion of the flags field.  Support for a feature
 778 * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
 779 * may optionally include the GET and/or SET bits to determine read vs write
 780 * access of the feature respectively.  Probing a feature will return success
 781 * if the feature is supported and all of the optionally indicated GET/SET
 782 * methods are supported.  The format of the data portion of the structure is
 783 * specific to the given feature.  The data portion is not required for
 784 * probing.  GET and SET are mutually exclusive, except for use with PROBE.
 785 *
 786 * Return 0 on success, -errno on failure.
 787 */
 788struct vfio_device_feature {
 789        __u32   argsz;
 790        __u32   flags;
 791#define VFIO_DEVICE_FEATURE_MASK        (0xffff) /* 16-bit feature index */
 792#define VFIO_DEVICE_FEATURE_GET         (1 << 16) /* Get feature into data[] */
 793#define VFIO_DEVICE_FEATURE_SET         (1 << 17) /* Set feature from data[] */
 794#define VFIO_DEVICE_FEATURE_PROBE       (1 << 18) /* Probe feature support */
 795        __u8    data[];
 796};
 797
 798#define VFIO_DEVICE_FEATURE             _IO(VFIO_TYPE, VFIO_BASE + 17)
 799
 800/*
 801 * Provide support for setting a PCI VF Token, which is used as a shared
 802 * secret between PF and VF drivers.  This feature may only be set on a
 803 * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
 804 * open VFs.  Data provided when setting this feature is a 16-byte array
 805 * (__u8 b[16]), representing a UUID.
 806 */
 807#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN        (0)
 808
 809/*
 810 * Indicates the device can support the migration API through
 811 * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
 812 * ERROR states are always supported. Support for additional states is
 813 * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
 814 * set.
 815 *
 816 * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
 817 * RESUMING are supported.
 818 *
 819 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
 820 * is supported in addition to the STOP_COPY states.
 821 *
 822 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
 823 * PRE_COPY is supported in addition to the STOP_COPY states.
 824 *
 825 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
 826 * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
 827 * in addition to the STOP_COPY states.
 828 *
 829 * Other combinations of flags have behavior to be defined in the future.
 830 */
 831struct vfio_device_feature_migration {
 832        __aligned_u64 flags;
 833#define VFIO_MIGRATION_STOP_COPY        (1 << 0)
 834#define VFIO_MIGRATION_P2P              (1 << 1)
 835#define VFIO_MIGRATION_PRE_COPY         (1 << 2)
 836};
 837#define VFIO_DEVICE_FEATURE_MIGRATION 1
 838
 839/*
 840 * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
 841 * device. The new state is supplied in device_state, see enum
 842 * vfio_device_mig_state for details
 843 *
 844 * The kernel migration driver must fully transition the device to the new state
 845 * value before the operation returns to the user.
 846 *
 847 * The kernel migration driver must not generate asynchronous device state
 848 * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
 849 * ioctl as described above.
 850 *
 851 * If this function fails then current device_state may be the original
 852 * operating state or some other state along the combination transition path.
 853 * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
 854 * to return to the original state, or attempt to return to some other state
 855 * such as RUNNING or STOP.
 856 *
 857 * If the new_state starts a new data transfer session then the FD associated
 858 * with that session is returned in data_fd. The user is responsible to close
 859 * this FD when it is finished. The user must consider the migration data stream
 860 * carried over the FD to be opaque and must preserve the byte order of the
 861 * stream. The user is not required to preserve buffer segmentation when writing
 862 * the data stream during the RESUMING operation.
 863 *
 864 * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
 865 * device, data_fd will be -1.
 866 */
 867struct vfio_device_feature_mig_state {
 868        __u32 device_state; /* From enum vfio_device_mig_state */
 869        __s32 data_fd;
 870};
 871#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
 872
 873/*
 874 * The device migration Finite State Machine is described by the enum
 875 * vfio_device_mig_state. Some of the FSM arcs will create a migration data
 876 * transfer session by returning a FD, in this case the migration data will
 877 * flow over the FD using read() and write() as discussed below.
 878 *
 879 * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
 880 *  RUNNING - The device is running normally
 881 *  STOP - The device does not change the internal or external state
 882 *  STOP_COPY - The device internal state can be read out
 883 *  RESUMING - The device is stopped and is loading a new internal state
 884 *  ERROR - The device has failed and must be reset
 885 *
 886 * And optional states to support VFIO_MIGRATION_P2P:
 887 *  RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
 888 * And VFIO_MIGRATION_PRE_COPY:
 889 *  PRE_COPY - The device is running normally but tracking internal state
 890 *             changes
 891 * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
 892 *  PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
 893 *
 894 * The FSM takes actions on the arcs between FSM states. The driver implements
 895 * the following behavior for the FSM arcs:
 896 *
 897 * RUNNING_P2P -> STOP
 898 * STOP_COPY -> STOP
 899 *   While in STOP the device must stop the operation of the device. The device
 900 *   must not generate interrupts, DMA, or any other change to external state.
 901 *   It must not change its internal state. When stopped the device and kernel
 902 *   migration driver must accept and respond to interaction to support external
 903 *   subsystems in the STOP state, for example PCI MSI-X and PCI config space.
 904 *   Failure by the user to restrict device access while in STOP must not result
 905 *   in error conditions outside the user context (ex. host system faults).
 906 *
 907 *   The STOP_COPY arc will terminate a data transfer session.
 908 *
 909 * RESUMING -> STOP
 910 *   Leaving RESUMING terminates a data transfer session and indicates the
 911 *   device should complete processing of the data delivered by write(). The
 912 *   kernel migration driver should complete the incorporation of data written
 913 *   to the data transfer FD into the device internal state and perform
 914 *   final validity and consistency checking of the new device state. If the
 915 *   user provided data is found to be incomplete, inconsistent, or otherwise
 916 *   invalid, the migration driver must fail the SET_STATE ioctl and
 917 *   optionally go to the ERROR state as described below.
 918 *
 919 *   While in STOP the device has the same behavior as other STOP states
 920 *   described above.
 921 *
 922 *   To abort a RESUMING session the device must be reset.
 923 *
 924 * PRE_COPY -> RUNNING
 925 * RUNNING_P2P -> RUNNING
 926 *   While in RUNNING the device is fully operational, the device may generate
 927 *   interrupts, DMA, respond to MMIO, all vfio device regions are functional,
 928 *   and the device may advance its internal state.
 929 *
 930 *   The PRE_COPY arc will terminate a data transfer session.
 931 *
 932 * PRE_COPY_P2P -> RUNNING_P2P
 933 * RUNNING -> RUNNING_P2P
 934 * STOP -> RUNNING_P2P
 935 *   While in RUNNING_P2P the device is partially running in the P2P quiescent
 936 *   state defined below.
 937 *
 938 *   The PRE_COPY_P2P arc will terminate a data transfer session.
 939 *
 940 * RUNNING -> PRE_COPY
 941 * RUNNING_P2P -> PRE_COPY_P2P
 942 * STOP -> STOP_COPY
 943 *   PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
 944 *   which share a data transfer session. Moving between these states alters
 945 *   what is streamed in session, but does not terminate or otherwise affect
 946 *   the associated fd.
 947 *
 948 *   These arcs begin the process of saving the device state and will return a
 949 *   new data_fd. The migration driver may perform actions such as enabling
 950 *   dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
 951 *
 952 *   Each arc does not change the device operation, the device remains
 953 *   RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
 954 *   in PRE_COPY_P2P -> STOP_COPY.
 955 *
 956 * PRE_COPY -> PRE_COPY_P2P
 957 *   Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
 958 *   However, while in the PRE_COPY_P2P state, the device is partially running
 959 *   in the P2P quiescent state defined below, like RUNNING_P2P.
 960 *
 961 * PRE_COPY_P2P -> PRE_COPY
 962 *   This arc allows returning the device to a full RUNNING behavior while
 963 *   continuing all the behaviors of PRE_COPY.
 964 *
 965 * PRE_COPY_P2P -> STOP_COPY
 966 *   While in the STOP_COPY state the device has the same behavior as STOP
 967 *   with the addition that the data transfers session continues to stream the
 968 *   migration state. End of stream on the FD indicates the entire device
 969 *   state has been transferred.
 970 *
 971 *   The user should take steps to restrict access to vfio device regions while
 972 *   the device is in STOP_COPY or risk corruption of the device migration data
 973 *   stream.
 974 *
 975 * STOP -> RESUMING
 976 *   Entering the RESUMING state starts a process of restoring the device state
 977 *   and will return a new data_fd. The data stream fed into the data_fd should
 978 *   be taken from the data transfer output of a single FD during saving from
 979 *   a compatible device. The migration driver may alter/reset the internal
 980 *   device state for this arc if required to prepare the device to receive the
 981 *   migration data.
 982 *
 983 * STOP_COPY -> PRE_COPY
 984 * STOP_COPY -> PRE_COPY_P2P
 985 *   These arcs are not permitted and return error if requested. Future
 986 *   revisions of this API may define behaviors for these arcs, in this case
 987 *   support will be discoverable by a new flag in
 988 *   VFIO_DEVICE_FEATURE_MIGRATION.
 989 *
 990 * any -> ERROR
 991 *   ERROR cannot be specified as a device state, however any transition request
 992 *   can be failed with an errno return and may then move the device_state into
 993 *   ERROR. In this case the device was unable to execute the requested arc and
 994 *   was also unable to restore the device to any valid device_state.
 995 *   To recover from ERROR VFIO_DEVICE_RESET must be used to return the
 996 *   device_state back to RUNNING.
 997 *
 998 * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
 999 * state for the device for the purposes of managing multiple devices within a
1000 * user context where peer-to-peer DMA between devices may be active. The
1001 * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
1002 * any new P2P DMA transactions. If the device can identify P2P transactions
1003 * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
1004 * driver must complete any such outstanding operations prior to completing the
1005 * FSM arc into a P2P state. For the purpose of specification the states
1006 * behave as though the device was fully running if not supported. Like while in
1007 * STOP or STOP_COPY the user must not touch the device, otherwise the state
1008 * can be exited.
1009 *
1010 * The remaining possible transitions are interpreted as combinations of the
1011 * above FSM arcs. As there are multiple paths through the FSM arcs the path
1012 * should be selected based on the following rules:
1013 *   - Select the shortest path.
1014 *   - The path cannot have saving group states as interior arcs, only
1015 *     starting/end states.
1016 * Refer to vfio_mig_get_next_state() for the result of the algorithm.
1017 *
1018 * The automatic transit through the FSM arcs that make up the combination
1019 * transition is invisible to the user. When working with combination arcs the
1020 * user may see any step along the path in the device_state if SET_STATE
1021 * fails. When handling these types of errors users should anticipate future
1022 * revisions of this protocol using new states and those states becoming
1023 * visible in this case.
1024 *
1025 * The optional states cannot be used with SET_STATE if the device does not
1026 * support them. The user can discover if these states are supported by using
1027 * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
1028 * avoid knowing about these optional states if the kernel driver supports them.
1029 *
1030 * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
1031 * is not present.
1032 */
1033enum vfio_device_mig_state {
1034        VFIO_DEVICE_STATE_ERROR = 0,
1035        VFIO_DEVICE_STATE_STOP = 1,
1036        VFIO_DEVICE_STATE_RUNNING = 2,
1037        VFIO_DEVICE_STATE_STOP_COPY = 3,
1038        VFIO_DEVICE_STATE_RESUMING = 4,
1039        VFIO_DEVICE_STATE_RUNNING_P2P = 5,
1040        VFIO_DEVICE_STATE_PRE_COPY = 6,
1041        VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
1042};
1043
1044/**
1045 * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
1046 *
1047 * This ioctl is used on the migration data FD in the precopy phase of the
1048 * migration data transfer. It returns an estimate of the current data sizes
1049 * remaining to be transferred. It allows the user to judge when it is
1050 * appropriate to leave PRE_COPY for STOP_COPY.
1051 *
1052 * This ioctl is valid only in PRE_COPY states and kernel driver should
1053 * return -EINVAL from any other migration state.
1054 *
1055 * The vfio_precopy_info data structure returned by this ioctl provides
1056 * estimates of data available from the device during the PRE_COPY states.
1057 * This estimate is split into two categories, initial_bytes and
1058 * dirty_bytes.
1059 *
1060 * The initial_bytes field indicates the amount of initial precopy
1061 * data available from the device. This field should have a non-zero initial
1062 * value and decrease as migration data is read from the device.
1063 * It is recommended to leave PRE_COPY for STOP_COPY only after this field
1064 * reaches zero. Leaving PRE_COPY earlier might make things slower.
1065 *
1066 * The dirty_bytes field tracks device state changes relative to data
1067 * previously retrieved.  This field starts at zero and may increase as
1068 * the internal device state is modified or decrease as that modified
1069 * state is read from the device.
1070 *
1071 * Userspace may use the combination of these fields to estimate the
1072 * potential data size available during the PRE_COPY phases, as well as
1073 * trends relative to the rate the device is dirtying its internal
1074 * state, but these fields are not required to have any bearing relative
1075 * to the data size available during the STOP_COPY phase.
1076 *
1077 * Drivers have a lot of flexibility in when and what they transfer during the
1078 * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
1079 *
1080 * During pre-copy the migration data FD has a temporary "end of stream" that is
1081 * reached when both initial_bytes and dirty_byte are zero. For instance, this
1082 * may indicate that the device is idle and not currently dirtying any internal
1083 * state. When read() is done on this temporary end of stream the kernel driver
1084 * should return ENOMSG from read(). Userspace can wait for more data (which may
1085 * never come) by using poll.
1086 *
1087 * Once in STOP_COPY the migration data FD has a permanent end of stream
1088 * signaled in the usual way by read() always returning 0 and poll always
1089 * returning readable. ENOMSG may not be returned in STOP_COPY.
1090 * Support for this ioctl is mandatory if a driver claims to support
1091 * VFIO_MIGRATION_PRE_COPY.
1092 *
1093 * Return: 0 on success, -1 and errno set on failure.
1094 */
1095struct vfio_precopy_info {
1096        __u32 argsz;
1097        __u32 flags;
1098        __aligned_u64 initial_bytes;
1099        __aligned_u64 dirty_bytes;
1100};
1101
1102#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
1103
1104/*
1105 * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
1106 * state with the platform-based power management.  Device use of lower power
1107 * states depends on factors managed by the runtime power management core,
1108 * including system level support and coordinating support among dependent
1109 * devices.  Enabling device low power entry does not guarantee lower power
1110 * usage by the device, nor is a mechanism provided through this feature to
1111 * know the current power state of the device.  If any device access happens
1112 * (either from the host or through the vfio uAPI) when the device is in the
1113 * low power state, then the host will move the device out of the low power
1114 * state as necessary prior to the access.  Once the access is completed, the
1115 * device may re-enter the low power state.  For single shot low power support
1116 * with wake-up notification, see
1117 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below.  Access to mmap'd
1118 * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
1119 * calling LOW_POWER_EXIT.
1120 */
1121#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
1122
1123/*
1124 * This device feature has the same behavior as
1125 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
1126 * provides an eventfd for wake-up notification.  When the device moves out of
1127 * the low power state for the wake-up, the host will not allow the device to
1128 * re-enter a low power state without a subsequent user call to one of the low
1129 * power entry device feature IOCTLs.  Access to mmap'd device regions is
1130 * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
1131 * low power exit.  The low power exit can happen either through LOW_POWER_EXIT
1132 * or through any other access (where the wake-up notification has been
1133 * generated).  The access to mmap'd device regions will not trigger low power
1134 * exit.
1135 *
1136 * The notification through the provided eventfd will be generated only when
1137 * the device has entered and is resumed from a low power state after
1138 * calling this device feature IOCTL.  A device that has not entered low power
1139 * state, as managed through the runtime power management core, will not
1140 * generate a notification through the provided eventfd on access.  Calling the
1141 * LOW_POWER_EXIT feature is optional in the case where notification has been
1142 * signaled on the provided eventfd that a resume from low power has occurred.
1143 */
1144struct vfio_device_low_power_entry_with_wakeup {
1145        __s32 wakeup_eventfd;
1146        __u32 reserved;
1147};
1148
1149#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
1150
1151/*
1152 * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
1153 * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
1154 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
1155 * This device feature IOCTL may itself generate a wakeup eventfd notification
1156 * in the latter case if the device had previously entered a low power state.
1157 */
1158#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
1159
1160/*
1161 * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
1162 * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
1163 * DMA logging.
1164 *
1165 * DMA logging allows a device to internally record what DMAs the device is
1166 * initiating and report them back to userspace. It is part of the VFIO
1167 * migration infrastructure that allows implementing dirty page tracking
1168 * during the pre copy phase of live migration. Only DMA WRITEs are logged,
1169 * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
1170 *
1171 * When DMA logging is started a range of IOVAs to monitor is provided and the
1172 * device can optimize its logging to cover only the IOVA range given. Each
1173 * DMA that the device initiates inside the range will be logged by the device
1174 * for later retrieval.
1175 *
1176 * page_size is an input that hints what tracking granularity the device
1177 * should try to achieve. If the device cannot do the hinted page size then
1178 * it's the driver choice which page size to pick based on its support.
1179 * On output the device will return the page size it selected.
1180 *
1181 * ranges is a pointer to an array of
1182 * struct vfio_device_feature_dma_logging_range.
1183 *
1184 * The core kernel code guarantees to support by minimum num_ranges that fit
1185 * into a single kernel page. User space can try higher values but should give
1186 * up if the above can't be achieved as of some driver limitations.
1187 *
1188 * A single call to start device DMA logging can be issued and a matching stop
1189 * should follow at the end. Another start is not allowed in the meantime.
1190 */
1191struct vfio_device_feature_dma_logging_control {
1192        __aligned_u64 page_size;
1193        __u32 num_ranges;
1194        __u32 __reserved;
1195        __aligned_u64 ranges;
1196};
1197
1198struct vfio_device_feature_dma_logging_range {
1199        __aligned_u64 iova;
1200        __aligned_u64 length;
1201};
1202
1203#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
1204
1205/*
1206 * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
1207 * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
1208 */
1209#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
1210
1211/*
1212 * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
1213 *
1214 * Query the device's DMA log for written pages within the given IOVA range.
1215 * During querying the log is cleared for the IOVA range.
1216 *
1217 * bitmap is a pointer to an array of u64s that will hold the output bitmap
1218 * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
1219 * is given by:
1220 *  bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
1221 *
1222 * The input page_size can be any power of two value and does not have to
1223 * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
1224 * will format its internal logging to match the reporting page size, possibly
1225 * by replicating bits if the internal page size is lower than requested.
1226 *
1227 * The LOGGING_REPORT will only set bits in the bitmap and never clear or
1228 * perform any initialization of the user provided bitmap.
1229 *
1230 * If any error is returned userspace should assume that the dirty log is
1231 * corrupted. Error recovery is to consider all memory dirty and try to
1232 * restart the dirty tracking, or to abort/restart the whole migration.
1233 *
1234 * If DMA logging is not enabled, an error will be returned.
1235 *
1236 */
1237struct vfio_device_feature_dma_logging_report {
1238        __aligned_u64 iova;
1239        __aligned_u64 length;
1240        __aligned_u64 page_size;
1241        __aligned_u64 bitmap;
1242};
1243
1244#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
1245
1246/*
1247 * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
1248 * be required to complete stop copy.
1249 *
1250 * Note: Can be called on each device state.
1251 */
1252
1253struct vfio_device_feature_mig_data_size {
1254        __aligned_u64 stop_copy_length;
1255};
1256
1257#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
1258
1259/* -------- API for Type1 VFIO IOMMU -------- */
1260
1261/**
1262 * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
1263 *
1264 * Retrieve information about the IOMMU object. Fills in provided
1265 * struct vfio_iommu_info. Caller sets argsz.
1266 *
1267 * XXX Should we do these by CHECK_EXTENSION too?
1268 */
1269struct vfio_iommu_type1_info {
1270        __u32   argsz;
1271        __u32   flags;
1272#define VFIO_IOMMU_INFO_PGSIZES (1 << 0)        /* supported page sizes info */
1273#define VFIO_IOMMU_INFO_CAPS    (1 << 1)        /* Info supports caps */
1274        __u64   iova_pgsizes;   /* Bitmap of supported page sizes */
1275        __u32   cap_offset;     /* Offset within info struct of first cap */
1276};
1277
1278/*
1279 * The IOVA capability allows to report the valid IOVA range(s)
1280 * excluding any non-relaxable reserved regions exposed by
1281 * devices attached to the container. Any DMA map attempt
1282 * outside the valid iova range will return error.
1283 *
1284 * The structures below define version 1 of this capability.
1285 */
1286#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
1287
1288struct vfio_iova_range {
1289        __u64   start;
1290        __u64   end;
1291};
1292
1293struct vfio_iommu_type1_info_cap_iova_range {
1294        struct  vfio_info_cap_header header;
1295        __u32   nr_iovas;
1296        __u32   reserved;
1297        struct  vfio_iova_range iova_ranges[];
1298};
1299
1300/*
1301 * The migration capability allows to report supported features for migration.
1302 *
1303 * The structures below define version 1 of this capability.
1304 *
1305 * The existence of this capability indicates that IOMMU kernel driver supports
1306 * dirty page logging.
1307 *
1308 * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
1309 * page logging.
1310 * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
1311 * size in bytes that can be used by user applications when getting the dirty
1312 * bitmap.
1313 */
1314#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
1315
1316struct vfio_iommu_type1_info_cap_migration {
1317        struct  vfio_info_cap_header header;
1318        __u32   flags;
1319        __u64   pgsize_bitmap;
1320        __u64   max_dirty_bitmap_size;          /* in bytes */
1321};
1322
1323/*
1324 * The DMA available capability allows to report the current number of
1325 * simultaneously outstanding DMA mappings that are allowed.
1326 *
1327 * The structure below defines version 1 of this capability.
1328 *
1329 * avail: specifies the current number of outstanding DMA mappings allowed.
1330 */
1331#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
1332
1333struct vfio_iommu_type1_info_dma_avail {
1334        struct  vfio_info_cap_header header;
1335        __u32   avail;
1336};
1337
1338#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
1339
1340/**
1341 * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
1342 *
1343 * Map process virtual addresses to IO virtual addresses using the
1344 * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
1345 *
1346 * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
1347 * unblock translation of host virtual addresses in the iova range.  The vaddr
1348 * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
1349 * maintain memory consistency within the user application, the updated vaddr
1350 * must address the same memory object as originally mapped.  Failure to do so
1351 * will result in user memory corruption and/or device misbehavior.  iova and
1352 * size must match those in the original MAP_DMA call.  Protection is not
1353 * changed, and the READ & WRITE flags must be 0.
1354 */
1355struct vfio_iommu_type1_dma_map {
1356        __u32   argsz;
1357        __u32   flags;
1358#define VFIO_DMA_MAP_FLAG_READ (1 << 0)         /* readable from device */
1359#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)        /* writable from device */
1360#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
1361        __u64   vaddr;                          /* Process virtual address */
1362        __u64   iova;                           /* IO virtual address */
1363        __u64   size;                           /* Size of mapping (bytes) */
1364};
1365
1366#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
1367
1368struct vfio_bitmap {
1369        __u64        pgsize;    /* page size for bitmap in bytes */
1370        __u64        size;      /* in bytes */
1371        __u64 *data;    /* one bit per page */
1372};
1373
1374/**
1375 * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
1376 *                                                      struct vfio_dma_unmap)
1377 *
1378 * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
1379 * Caller sets argsz.  The actual unmapped size is returned in the size
1380 * field.  No guarantee is made to the user that arbitrary unmaps of iova
1381 * or size different from those used in the original mapping call will
1382 * succeed.
1383 *
1384 * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
1385 * before unmapping IO virtual addresses. When this flag is set, the user must
1386 * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
1387 * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
1388 * A bit in the bitmap represents one page, of user provided page size in
1389 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1390 * indicates that the page at that offset from iova is dirty. A Bitmap of the
1391 * pages in the range of unmapped size is returned in the user-provided
1392 * vfio_bitmap.data.
1393 *
1394 * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses.  iova and size
1395 * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
1396 *
1397 * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
1398 * virtual addresses in the iova range.  Tasks that attempt to translate an
1399 * iova's vaddr will block.  DMA to already-mapped pages continues.  This
1400 * cannot be combined with the get-dirty-bitmap flag.
1401 */
1402struct vfio_iommu_type1_dma_unmap {
1403        __u32   argsz;
1404        __u32   flags;
1405#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
1406#define VFIO_DMA_UNMAP_FLAG_ALL              (1 << 1)
1407#define VFIO_DMA_UNMAP_FLAG_VADDR            (1 << 2)
1408        __u64   iova;                           /* IO virtual address */
1409        __u64   size;                           /* Size of mapping (bytes) */
1410        __u8    data[];
1411};
1412
1413#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
1414
1415/*
1416 * IOCTLs to enable/disable IOMMU container usage.
1417 * No parameters are supported.
1418 */
1419#define VFIO_IOMMU_ENABLE       _IO(VFIO_TYPE, VFIO_BASE + 15)
1420#define VFIO_IOMMU_DISABLE      _IO(VFIO_TYPE, VFIO_BASE + 16)
1421
1422/**
1423 * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
1424 *                                     struct vfio_iommu_type1_dirty_bitmap)
1425 * IOCTL is used for dirty pages logging.
1426 * Caller should set flag depending on which operation to perform, details as
1427 * below:
1428 *
1429 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
1430 * the IOMMU driver to log pages that are dirtied or potentially dirtied by
1431 * the device; designed to be used when a migration is in progress. Dirty pages
1432 * are logged until logging is disabled by user application by calling the IOCTL
1433 * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
1434 *
1435 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
1436 * the IOMMU driver to stop logging dirtied pages.
1437 *
1438 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
1439 * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
1440 * The user must specify the IOVA range and the pgsize through the structure
1441 * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
1442 * supports getting a bitmap of the smallest supported pgsize only and can be
1443 * modified in future to get a bitmap of any specified supported pgsize. The
1444 * user must provide a zeroed memory area for the bitmap memory and specify its
1445 * size in bitmap.size. One bit is used to represent one page consecutively
1446 * starting from iova offset. The user should provide page size in bitmap.pgsize
1447 * field. A bit set in the bitmap indicates that the page at that offset from
1448 * iova is dirty. The caller must set argsz to a value including the size of
1449 * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
1450 * actual bitmap. If dirty pages logging is not enabled, an error will be
1451 * returned.
1452 *
1453 * Only one of the flags _START, _STOP and _GET may be specified at a time.
1454 *
1455 */
1456struct vfio_iommu_type1_dirty_bitmap {
1457        __u32        argsz;
1458        __u32        flags;
1459#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START       (1 << 0)
1460#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP        (1 << 1)
1461#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP  (1 << 2)
1462        __u8         data[];
1463};
1464
1465struct vfio_iommu_type1_dirty_bitmap_get {
1466        __u64              iova;        /* IO virtual address */
1467        __u64              size;        /* Size of iova range */
1468        struct vfio_bitmap bitmap;
1469};
1470
1471#define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
1472
1473/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
1474
1475/*
1476 * The SPAPR TCE DDW info struct provides the information about
1477 * the details of Dynamic DMA window capability.
1478 *
1479 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
1480 * @max_dynamic_windows_supported tells the maximum number of windows
1481 * which the platform can create.
1482 * @levels tells the maximum number of levels in multi-level IOMMU tables;
1483 * this allows splitting a table into smaller chunks which reduces
1484 * the amount of physically contiguous memory required for the table.
1485 */
1486struct vfio_iommu_spapr_tce_ddw_info {
1487        __u64 pgsizes;                  /* Bitmap of supported page sizes */
1488        __u32 max_dynamic_windows_supported;
1489        __u32 levels;
1490};
1491
1492/*
1493 * The SPAPR TCE info struct provides the information about the PCI bus
1494 * address ranges available for DMA, these values are programmed into
1495 * the hardware so the guest has to know that information.
1496 *
1497 * The DMA 32 bit window start is an absolute PCI bus address.
1498 * The IOVA address passed via map/unmap ioctls are absolute PCI bus
1499 * addresses too so the window works as a filter rather than an offset
1500 * for IOVA addresses.
1501 *
1502 * Flags supported:
1503 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
1504 *   (DDW) support is present. @ddw is only supported when DDW is present.
1505 */
1506struct vfio_iommu_spapr_tce_info {
1507        __u32 argsz;
1508        __u32 flags;
1509#define VFIO_IOMMU_SPAPR_INFO_DDW       (1 << 0)        /* DDW supported */
1510        __u32 dma32_window_start;       /* 32 bit window start (bytes) */
1511        __u32 dma32_window_size;        /* 32 bit window size (bytes) */
1512        struct vfio_iommu_spapr_tce_ddw_info ddw;
1513};
1514
1515#define VFIO_IOMMU_SPAPR_TCE_GET_INFO   _IO(VFIO_TYPE, VFIO_BASE + 12)
1516
1517/*
1518 * EEH PE operation struct provides ways to:
1519 * - enable/disable EEH functionality;
1520 * - unfreeze IO/DMA for frozen PE;
1521 * - read PE state;
1522 * - reset PE;
1523 * - configure PE;
1524 * - inject EEH error.
1525 */
1526struct vfio_eeh_pe_err {
1527        __u32 type;
1528        __u32 func;
1529        __u64 addr;
1530        __u64 mask;
1531};
1532
1533struct vfio_eeh_pe_op {
1534        __u32 argsz;
1535        __u32 flags;
1536        __u32 op;
1537        union {
1538                struct vfio_eeh_pe_err err;
1539        };
1540};
1541
1542#define VFIO_EEH_PE_DISABLE             0       /* Disable EEH functionality */
1543#define VFIO_EEH_PE_ENABLE              1       /* Enable EEH functionality  */
1544#define VFIO_EEH_PE_UNFREEZE_IO         2       /* Enable IO for frozen PE   */
1545#define VFIO_EEH_PE_UNFREEZE_DMA        3       /* Enable DMA for frozen PE  */
1546#define VFIO_EEH_PE_GET_STATE           4       /* PE state retrieval        */
1547#define  VFIO_EEH_PE_STATE_NORMAL       0       /* PE in functional state    */
1548#define  VFIO_EEH_PE_STATE_RESET        1       /* PE reset in progress      */
1549#define  VFIO_EEH_PE_STATE_STOPPED      2       /* Stopped DMA and IO        */
1550#define  VFIO_EEH_PE_STATE_STOPPED_DMA  4       /* Stopped DMA only          */
1551#define  VFIO_EEH_PE_STATE_UNAVAIL      5       /* State unavailable         */
1552#define VFIO_EEH_PE_RESET_DEACTIVATE    5       /* Deassert PE reset         */
1553#define VFIO_EEH_PE_RESET_HOT           6       /* Assert hot reset          */
1554#define VFIO_EEH_PE_RESET_FUNDAMENTAL   7       /* Assert fundamental reset  */
1555#define VFIO_EEH_PE_CONFIGURE           8       /* PE configuration          */
1556#define VFIO_EEH_PE_INJECT_ERR          9       /* Inject EEH error          */
1557
1558#define VFIO_EEH_PE_OP                  _IO(VFIO_TYPE, VFIO_BASE + 21)
1559
1560/**
1561 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
1562 *
1563 * Registers user space memory where DMA is allowed. It pins
1564 * user pages and does the locked memory accounting so
1565 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
1566 * get faster.
1567 */
1568struct vfio_iommu_spapr_register_memory {
1569        __u32   argsz;
1570        __u32   flags;
1571        __u64   vaddr;                          /* Process virtual address */
1572        __u64   size;                           /* Size of mapping (bytes) */
1573};
1574#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY        _IO(VFIO_TYPE, VFIO_BASE + 17)
1575
1576/**
1577 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
1578 *
1579 * Unregisters user space memory registered with
1580 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
1581 * Uses vfio_iommu_spapr_register_memory for parameters.
1582 */
1583#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY      _IO(VFIO_TYPE, VFIO_BASE + 18)
1584
1585/**
1586 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
1587 *
1588 * Creates an additional TCE table and programs it (sets a new DMA window)
1589 * to every IOMMU group in the container. It receives page shift, window
1590 * size and number of levels in the TCE table being created.
1591 *
1592 * It allocates and returns an offset on a PCI bus of the new DMA window.
1593 */
1594struct vfio_iommu_spapr_tce_create {
1595        __u32 argsz;
1596        __u32 flags;
1597        /* in */
1598        __u32 page_shift;
1599        __u32 __resv1;
1600        __u64 window_size;
1601        __u32 levels;
1602        __u32 __resv2;
1603        /* out */
1604        __u64 start_addr;
1605};
1606#define VFIO_IOMMU_SPAPR_TCE_CREATE     _IO(VFIO_TYPE, VFIO_BASE + 19)
1607
1608/**
1609 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
1610 *
1611 * Unprograms a TCE table from all groups in the container and destroys it.
1612 * It receives a PCI bus offset as a window id.
1613 */
1614struct vfio_iommu_spapr_tce_remove {
1615        __u32 argsz;
1616        __u32 flags;
1617        /* in */
1618        __u64 start_addr;
1619};
1620#define VFIO_IOMMU_SPAPR_TCE_REMOVE     _IO(VFIO_TYPE, VFIO_BASE + 20)
1621
1622/* ***************************************************************** */
1623
1624#endif /* VFIO_H */
1625