qemu/linux-headers/linux/vfio.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
   2/*
   3 * VFIO API definition
   4 *
   5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
   6 *     Author: Alex Williamson <alex.williamson@redhat.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#ifndef VFIO_H
  13#define VFIO_H
  14
  15#include <linux/types.h>
  16#include <linux/ioctl.h>
  17
  18#define VFIO_API_VERSION        0
  19
  20
  21/* Kernel & User level defines for VFIO IOCTLs. */
  22
  23/* Extensions */
  24
  25#define VFIO_TYPE1_IOMMU                1
  26#define VFIO_SPAPR_TCE_IOMMU            2
  27#define VFIO_TYPE1v2_IOMMU              3
  28/*
  29 * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
  30 * capability is subject to change as groups are added or removed.
  31 */
  32#define VFIO_DMA_CC_IOMMU               4
  33
  34/* Check if EEH is supported */
  35#define VFIO_EEH                        5
  36
  37/* Two-stage IOMMU */
  38#define VFIO_TYPE1_NESTING_IOMMU        6       /* Implies v2 */
  39
  40#define VFIO_SPAPR_TCE_v2_IOMMU         7
  41
  42/*
  43 * The No-IOMMU IOMMU offers no translation or isolation for devices and
  44 * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
  45 * code will taint the host kernel and should be used with extreme caution.
  46 */
  47#define VFIO_NOIOMMU_IOMMU              8
  48
  49/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
  50#define VFIO_UNMAP_ALL                  9
  51
  52/*
  53 * Supports the vaddr flag for DMA map and unmap.  Not supported for mediated
  54 * devices, so this capability is subject to change as groups are added or
  55 * removed.
  56 */
  57#define VFIO_UPDATE_VADDR               10
  58
  59/*
  60 * The IOCTL interface is designed for extensibility by embedding the
  61 * structure length (argsz) and flags into structures passed between
  62 * kernel and userspace.  We therefore use the _IO() macro for these
  63 * defines to avoid implicitly embedding a size into the ioctl request.
  64 * As structure fields are added, argsz will increase to match and flag
  65 * bits will be defined to indicate additional fields with valid data.
  66 * It's *always* the caller's responsibility to indicate the size of
  67 * the structure passed by setting argsz appropriately.
  68 */
  69
  70#define VFIO_TYPE       (';')
  71#define VFIO_BASE       100
  72
  73/*
  74 * For extension of INFO ioctls, VFIO makes use of a capability chain
  75 * designed after PCI/e capabilities.  A flag bit indicates whether
  76 * this capability chain is supported and a field defined in the fixed
  77 * structure defines the offset of the first capability in the chain.
  78 * This field is only valid when the corresponding bit in the flags
  79 * bitmap is set.  This offset field is relative to the start of the
  80 * INFO buffer, as is the next field within each capability header.
  81 * The id within the header is a shared address space per INFO ioctl,
  82 * while the version field is specific to the capability id.  The
  83 * contents following the header are specific to the capability id.
  84 */
  85struct vfio_info_cap_header {
  86        __u16   id;             /* Identifies capability */
  87        __u16   version;        /* Version specific to the capability ID */
  88        __u32   next;           /* Offset of next capability */
  89};
  90
  91/*
  92 * Callers of INFO ioctls passing insufficiently sized buffers will see
  93 * the capability chain flag bit set, a zero value for the first capability
  94 * offset (if available within the provided argsz), and argsz will be
  95 * updated to report the necessary buffer size.  For compatibility, the
  96 * INFO ioctl will not report error in this case, but the capability chain
  97 * will not be available.
  98 */
  99
 100/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
 101
 102/**
 103 * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
 104 *
 105 * Report the version of the VFIO API.  This allows us to bump the entire
 106 * API version should we later need to add or change features in incompatible
 107 * ways.
 108 * Return: VFIO_API_VERSION
 109 * Availability: Always
 110 */
 111#define VFIO_GET_API_VERSION            _IO(VFIO_TYPE, VFIO_BASE + 0)
 112
 113/**
 114 * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
 115 *
 116 * Check whether an extension is supported.
 117 * Return: 0 if not supported, 1 (or some other positive integer) if supported.
 118 * Availability: Always
 119 */
 120#define VFIO_CHECK_EXTENSION            _IO(VFIO_TYPE, VFIO_BASE + 1)
 121
 122/**
 123 * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
 124 *
 125 * Set the iommu to the given type.  The type must be supported by an
 126 * iommu driver as verified by calling CHECK_EXTENSION using the same
 127 * type.  A group must be set to this file descriptor before this
 128 * ioctl is available.  The IOMMU interfaces enabled by this call are
 129 * specific to the value set.
 130 * Return: 0 on success, -errno on failure
 131 * Availability: When VFIO group attached
 132 */
 133#define VFIO_SET_IOMMU                  _IO(VFIO_TYPE, VFIO_BASE + 2)
 134
 135/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
 136
 137/**
 138 * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
 139 *                                              struct vfio_group_status)
 140 *
 141 * Retrieve information about the group.  Fills in provided
 142 * struct vfio_group_info.  Caller sets argsz.
 143 * Return: 0 on succes, -errno on failure.
 144 * Availability: Always
 145 */
 146struct vfio_group_status {
 147        __u32   argsz;
 148        __u32   flags;
 149#define VFIO_GROUP_FLAGS_VIABLE         (1 << 0)
 150#define VFIO_GROUP_FLAGS_CONTAINER_SET  (1 << 1)
 151};
 152#define VFIO_GROUP_GET_STATUS           _IO(VFIO_TYPE, VFIO_BASE + 3)
 153
 154/**
 155 * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
 156 *
 157 * Set the container for the VFIO group to the open VFIO file
 158 * descriptor provided.  Groups may only belong to a single
 159 * container.  Containers may, at their discretion, support multiple
 160 * groups.  Only when a container is set are all of the interfaces
 161 * of the VFIO file descriptor and the VFIO group file descriptor
 162 * available to the user.
 163 * Return: 0 on success, -errno on failure.
 164 * Availability: Always
 165 */
 166#define VFIO_GROUP_SET_CONTAINER        _IO(VFIO_TYPE, VFIO_BASE + 4)
 167
 168/**
 169 * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
 170 *
 171 * Remove the group from the attached container.  This is the
 172 * opposite of the SET_CONTAINER call and returns the group to
 173 * an initial state.  All device file descriptors must be released
 174 * prior to calling this interface.  When removing the last group
 175 * from a container, the IOMMU will be disabled and all state lost,
 176 * effectively also returning the VFIO file descriptor to an initial
 177 * state.
 178 * Return: 0 on success, -errno on failure.
 179 * Availability: When attached to container
 180 */
 181#define VFIO_GROUP_UNSET_CONTAINER      _IO(VFIO_TYPE, VFIO_BASE + 5)
 182
 183/**
 184 * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
 185 *
 186 * Return a new file descriptor for the device object described by
 187 * the provided string.  The string should match a device listed in
 188 * the devices subdirectory of the IOMMU group sysfs entry.  The
 189 * group containing the device must already be added to this context.
 190 * Return: new file descriptor on success, -errno on failure.
 191 * Availability: When attached to container
 192 */
 193#define VFIO_GROUP_GET_DEVICE_FD        _IO(VFIO_TYPE, VFIO_BASE + 6)
 194
 195/* --------------- IOCTLs for DEVICE file descriptors --------------- */
 196
 197/**
 198 * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
 199 *                                              struct vfio_device_info)
 200 *
 201 * Retrieve information about the device.  Fills in provided
 202 * struct vfio_device_info.  Caller sets argsz.
 203 * Return: 0 on success, -errno on failure.
 204 */
 205struct vfio_device_info {
 206        __u32   argsz;
 207        __u32   flags;
 208#define VFIO_DEVICE_FLAGS_RESET (1 << 0)        /* Device supports reset */
 209#define VFIO_DEVICE_FLAGS_PCI   (1 << 1)        /* vfio-pci device */
 210#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)     /* vfio-platform device */
 211#define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)        /* vfio-amba device */
 212#define VFIO_DEVICE_FLAGS_CCW   (1 << 4)        /* vfio-ccw device */
 213#define VFIO_DEVICE_FLAGS_AP    (1 << 5)        /* vfio-ap device */
 214#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)       /* vfio-fsl-mc device */
 215#define VFIO_DEVICE_FLAGS_CAPS  (1 << 7)        /* Info supports caps */
 216#define VFIO_DEVICE_FLAGS_CDX   (1 << 8)        /* vfio-cdx device */
 217        __u32   num_regions;    /* Max region index + 1 */
 218        __u32   num_irqs;       /* Max IRQ index + 1 */
 219        __u32   cap_offset;     /* Offset within info struct of first cap */
 220};
 221#define VFIO_DEVICE_GET_INFO            _IO(VFIO_TYPE, VFIO_BASE + 7)
 222
 223/*
 224 * Vendor driver using Mediated device framework should provide device_api
 225 * attribute in supported type attribute groups. Device API string should be one
 226 * of the following corresponding to device flags in vfio_device_info structure.
 227 */
 228
 229#define VFIO_DEVICE_API_PCI_STRING              "vfio-pci"
 230#define VFIO_DEVICE_API_PLATFORM_STRING         "vfio-platform"
 231#define VFIO_DEVICE_API_AMBA_STRING             "vfio-amba"
 232#define VFIO_DEVICE_API_CCW_STRING              "vfio-ccw"
 233#define VFIO_DEVICE_API_AP_STRING               "vfio-ap"
 234
 235/*
 236 * The following capabilities are unique to s390 zPCI devices.  Their contents
 237 * are further-defined in vfio_zdev.h
 238 */
 239#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE          1
 240#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP         2
 241#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL          3
 242#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP          4
 243
 244/*
 245 * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
 246 * completion to the root bus with supported widths provided via flags.
 247 */
 248#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP    5
 249struct vfio_device_info_cap_pci_atomic_comp {
 250        struct vfio_info_cap_header header;
 251        __u32 flags;
 252#define VFIO_PCI_ATOMIC_COMP32  (1 << 0)
 253#define VFIO_PCI_ATOMIC_COMP64  (1 << 1)
 254#define VFIO_PCI_ATOMIC_COMP128 (1 << 2)
 255        __u32 reserved;
 256};
 257
 258/**
 259 * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
 260 *                                     struct vfio_region_info)
 261 *
 262 * Retrieve information about a device region.  Caller provides
 263 * struct vfio_region_info with index value set.  Caller sets argsz.
 264 * Implementation of region mapping is bus driver specific.  This is
 265 * intended to describe MMIO, I/O port, as well as bus specific
 266 * regions (ex. PCI config space).  Zero sized regions may be used
 267 * to describe unimplemented regions (ex. unimplemented PCI BARs).
 268 * Return: 0 on success, -errno on failure.
 269 */
 270struct vfio_region_info {
 271        __u32   argsz;
 272        __u32   flags;
 273#define VFIO_REGION_INFO_FLAG_READ      (1 << 0) /* Region supports read */
 274#define VFIO_REGION_INFO_FLAG_WRITE     (1 << 1) /* Region supports write */
 275#define VFIO_REGION_INFO_FLAG_MMAP      (1 << 2) /* Region supports mmap */
 276#define VFIO_REGION_INFO_FLAG_CAPS      (1 << 3) /* Info supports caps */
 277        __u32   index;          /* Region index */
 278        __u32   cap_offset;     /* Offset within info struct of first cap */
 279        __u64   size;           /* Region size (bytes) */
 280        __u64   offset;         /* Region offset from start of device fd */
 281};
 282#define VFIO_DEVICE_GET_REGION_INFO     _IO(VFIO_TYPE, VFIO_BASE + 8)
 283
 284/*
 285 * The sparse mmap capability allows finer granularity of specifying areas
 286 * within a region with mmap support.  When specified, the user should only
 287 * mmap the offset ranges specified by the areas array.  mmaps outside of the
 288 * areas specified may fail (such as the range covering a PCI MSI-X table) or
 289 * may result in improper device behavior.
 290 *
 291 * The structures below define version 1 of this capability.
 292 */
 293#define VFIO_REGION_INFO_CAP_SPARSE_MMAP        1
 294
 295struct vfio_region_sparse_mmap_area {
 296        __u64   offset; /* Offset of mmap'able area within region */
 297        __u64   size;   /* Size of mmap'able area */
 298};
 299
 300struct vfio_region_info_cap_sparse_mmap {
 301        struct vfio_info_cap_header header;
 302        __u32   nr_areas;
 303        __u32   reserved;
 304        struct vfio_region_sparse_mmap_area areas[];
 305};
 306
 307/*
 308 * The device specific type capability allows regions unique to a specific
 309 * device or class of devices to be exposed.  This helps solve the problem for
 310 * vfio bus drivers of defining which region indexes correspond to which region
 311 * on the device, without needing to resort to static indexes, as done by
 312 * vfio-pci.  For instance, if we were to go back in time, we might remove
 313 * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
 314 * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
 315 * make a "VGA" device specific type to describe the VGA access space.  This
 316 * means that non-VGA devices wouldn't need to waste this index, and thus the
 317 * address space associated with it due to implementation of device file
 318 * descriptor offsets in vfio-pci.
 319 *
 320 * The current implementation is now part of the user ABI, so we can't use this
 321 * for VGA, but there are other upcoming use cases, such as opregions for Intel
 322 * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
 323 * use this for future additions.
 324 *
 325 * The structure below defines version 1 of this capability.
 326 */
 327#define VFIO_REGION_INFO_CAP_TYPE       2
 328
 329struct vfio_region_info_cap_type {
 330        struct vfio_info_cap_header header;
 331        __u32 type;     /* global per bus driver */
 332        __u32 subtype;  /* type specific */
 333};
 334
 335/*
 336 * List of region types, global per bus driver.
 337 * If you introduce a new type, please add it here.
 338 */
 339
 340/* PCI region type containing a PCI vendor part */
 341#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE        (1 << 31)
 342#define VFIO_REGION_TYPE_PCI_VENDOR_MASK        (0xffff)
 343#define VFIO_REGION_TYPE_GFX                    (1)
 344#define VFIO_REGION_TYPE_CCW                    (2)
 345#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED   (3)
 346
 347/* sub-types for VFIO_REGION_TYPE_PCI_* */
 348
 349/* 8086 vendor PCI sub-types */
 350#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION  (1)
 351#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG  (2)
 352#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG   (3)
 353
 354/* 10de vendor PCI sub-types */
 355/*
 356 * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
 357 *
 358 * Deprecated, region no longer provided
 359 */
 360#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM  (1)
 361
 362/* 1014 vendor PCI sub-types */
 363/*
 364 * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
 365 * to do TLB invalidation on a GPU.
 366 *
 367 * Deprecated, region no longer provided
 368 */
 369#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD    (1)
 370
 371/* sub-types for VFIO_REGION_TYPE_GFX */
 372#define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
 373
 374/**
 375 * struct vfio_region_gfx_edid - EDID region layout.
 376 *
 377 * Set display link state and EDID blob.
 378 *
 379 * The EDID blob has monitor information such as brand, name, serial
 380 * number, physical size, supported video modes and more.
 381 *
 382 * This special region allows userspace (typically qemu) set a virtual
 383 * EDID for the virtual monitor, which allows a flexible display
 384 * configuration.
 385 *
 386 * For the edid blob spec look here:
 387 *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
 388 *
 389 * On linux systems you can find the EDID blob in sysfs:
 390 *    /sys/class/drm/${card}/${connector}/edid
 391 *
 392 * You can use the edid-decode ulility (comes with xorg-x11-utils) to
 393 * decode the EDID blob.
 394 *
 395 * @edid_offset: location of the edid blob, relative to the
 396 *               start of the region (readonly).
 397 * @edid_max_size: max size of the edid blob (readonly).
 398 * @edid_size: actual edid size (read/write).
 399 * @link_state: display link state (read/write).
 400 * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
 401 * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
 402 * @max_xres: max display width (0 == no limitation, readonly).
 403 * @max_yres: max display height (0 == no limitation, readonly).
 404 *
 405 * EDID update protocol:
 406 *   (1) set link-state to down.
 407 *   (2) update edid blob and size.
 408 *   (3) set link-state to up.
 409 */
 410struct vfio_region_gfx_edid {
 411        __u32 edid_offset;
 412        __u32 edid_max_size;
 413        __u32 edid_size;
 414        __u32 max_xres;
 415        __u32 max_yres;
 416        __u32 link_state;
 417#define VFIO_DEVICE_GFX_LINK_STATE_UP    1
 418#define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
 419};
 420
 421/* sub-types for VFIO_REGION_TYPE_CCW */
 422#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD       (1)
 423#define VFIO_REGION_SUBTYPE_CCW_SCHIB           (2)
 424#define VFIO_REGION_SUBTYPE_CCW_CRW             (3)
 425
 426/* sub-types for VFIO_REGION_TYPE_MIGRATION */
 427#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
 428
 429struct vfio_device_migration_info {
 430        __u32 device_state;         /* VFIO device state */
 431#define VFIO_DEVICE_STATE_V1_STOP      (0)
 432#define VFIO_DEVICE_STATE_V1_RUNNING   (1 << 0)
 433#define VFIO_DEVICE_STATE_V1_SAVING    (1 << 1)
 434#define VFIO_DEVICE_STATE_V1_RESUMING  (1 << 2)
 435#define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_V1_RUNNING | \
 436                                     VFIO_DEVICE_STATE_V1_SAVING |  \
 437                                     VFIO_DEVICE_STATE_V1_RESUMING)
 438
 439#define VFIO_DEVICE_STATE_VALID(state) \
 440        (state & VFIO_DEVICE_STATE_V1_RESUMING ? \
 441        (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
 442
 443#define VFIO_DEVICE_STATE_IS_ERROR(state) \
 444        ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
 445                                              VFIO_DEVICE_STATE_V1_RESUMING))
 446
 447#define VFIO_DEVICE_STATE_SET_ERROR(state) \
 448        ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
 449                                             VFIO_DEVICE_STATE_V1_RESUMING)
 450
 451        __u32 reserved;
 452        __u64 pending_bytes;
 453        __u64 data_offset;
 454        __u64 data_size;
 455};
 456
 457/*
 458 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
 459 * which allows direct access to non-MSIX registers which happened to be within
 460 * the same system page.
 461 *
 462 * Even though the userspace gets direct access to the MSIX data, the existing
 463 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
 464 */
 465#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE      3
 466
 467/*
 468 * Capability with compressed real address (aka SSA - small system address)
 469 * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
 470 * and by the userspace to associate a NVLink bridge with a GPU.
 471 *
 472 * Deprecated, capability no longer provided
 473 */
 474#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT     4
 475
 476struct vfio_region_info_cap_nvlink2_ssatgt {
 477        struct vfio_info_cap_header header;
 478        __u64 tgt;
 479};
 480
 481/*
 482 * Capability with an NVLink link speed. The value is read by
 483 * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
 484 * property in the device tree. The value is fixed in the hardware
 485 * and failing to provide the correct value results in the link
 486 * not working with no indication from the driver why.
 487 *
 488 * Deprecated, capability no longer provided
 489 */
 490#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD     5
 491
 492struct vfio_region_info_cap_nvlink2_lnkspd {
 493        struct vfio_info_cap_header header;
 494        __u32 link_speed;
 495        __u32 __pad;
 496};
 497
 498/**
 499 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
 500 *                                  struct vfio_irq_info)
 501 *
 502 * Retrieve information about a device IRQ.  Caller provides
 503 * struct vfio_irq_info with index value set.  Caller sets argsz.
 504 * Implementation of IRQ mapping is bus driver specific.  Indexes
 505 * using multiple IRQs are primarily intended to support MSI-like
 506 * interrupt blocks.  Zero count irq blocks may be used to describe
 507 * unimplemented interrupt types.
 508 *
 509 * The EVENTFD flag indicates the interrupt index supports eventfd based
 510 * signaling.
 511 *
 512 * The MASKABLE flags indicates the index supports MASK and UNMASK
 513 * actions described below.
 514 *
 515 * AUTOMASKED indicates that after signaling, the interrupt line is
 516 * automatically masked by VFIO and the user needs to unmask the line
 517 * to receive new interrupts.  This is primarily intended to distinguish
 518 * level triggered interrupts.
 519 *
 520 * The NORESIZE flag indicates that the interrupt lines within the index
 521 * are setup as a set and new subindexes cannot be enabled without first
 522 * disabling the entire index.  This is used for interrupts like PCI MSI
 523 * and MSI-X where the driver may only use a subset of the available
 524 * indexes, but VFIO needs to enable a specific number of vectors
 525 * upfront.  In the case of MSI-X, where the user can enable MSI-X and
 526 * then add and unmask vectors, it's up to userspace to make the decision
 527 * whether to allocate the maximum supported number of vectors or tear
 528 * down setup and incrementally increase the vectors as each is enabled.
 529 * Absence of the NORESIZE flag indicates that vectors can be enabled
 530 * and disabled dynamically without impacting other vectors within the
 531 * index.
 532 */
 533struct vfio_irq_info {
 534        __u32   argsz;
 535        __u32   flags;
 536#define VFIO_IRQ_INFO_EVENTFD           (1 << 0)
 537#define VFIO_IRQ_INFO_MASKABLE          (1 << 1)
 538#define VFIO_IRQ_INFO_AUTOMASKED        (1 << 2)
 539#define VFIO_IRQ_INFO_NORESIZE          (1 << 3)
 540        __u32   index;          /* IRQ index */
 541        __u32   count;          /* Number of IRQs within this index */
 542};
 543#define VFIO_DEVICE_GET_IRQ_INFO        _IO(VFIO_TYPE, VFIO_BASE + 9)
 544
 545/**
 546 * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
 547 *
 548 * Set signaling, masking, and unmasking of interrupts.  Caller provides
 549 * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
 550 * the range of subindexes being specified.
 551 *
 552 * The DATA flags specify the type of data provided.  If DATA_NONE, the
 553 * operation performs the specified action immediately on the specified
 554 * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
 555 * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
 556 *
 557 * DATA_BOOL allows sparse support for the same on arrays of interrupts.
 558 * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
 559 * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
 560 * data = {1,0,1}
 561 *
 562 * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
 563 * A value of -1 can be used to either de-assign interrupts if already
 564 * assigned or skip un-assigned interrupts.  For example, to set an eventfd
 565 * to be trigger for interrupts [0,0] and [0,2]:
 566 * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
 567 * data = {fd1, -1, fd2}
 568 * If index [0,1] is previously set, two count = 1 ioctls calls would be
 569 * required to set [0,0] and [0,2] without changing [0,1].
 570 *
 571 * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
 572 * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
 573 * from userspace (ie. simulate hardware triggering).
 574 *
 575 * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
 576 * enables the interrupt index for the device.  Individual subindex interrupts
 577 * can be disabled using the -1 value for DATA_EVENTFD or the index can be
 578 * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
 579 *
 580 * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
 581 * ACTION_TRIGGER specifies kernel->user signaling.
 582 */
 583struct vfio_irq_set {
 584        __u32   argsz;
 585        __u32   flags;
 586#define VFIO_IRQ_SET_DATA_NONE          (1 << 0) /* Data not present */
 587#define VFIO_IRQ_SET_DATA_BOOL          (1 << 1) /* Data is bool (u8) */
 588#define VFIO_IRQ_SET_DATA_EVENTFD       (1 << 2) /* Data is eventfd (s32) */
 589#define VFIO_IRQ_SET_ACTION_MASK        (1 << 3) /* Mask interrupt */
 590#define VFIO_IRQ_SET_ACTION_UNMASK      (1 << 4) /* Unmask interrupt */
 591#define VFIO_IRQ_SET_ACTION_TRIGGER     (1 << 5) /* Trigger interrupt */
 592        __u32   index;
 593        __u32   start;
 594        __u32   count;
 595        __u8    data[];
 596};
 597#define VFIO_DEVICE_SET_IRQS            _IO(VFIO_TYPE, VFIO_BASE + 10)
 598
 599#define VFIO_IRQ_SET_DATA_TYPE_MASK     (VFIO_IRQ_SET_DATA_NONE | \
 600                                         VFIO_IRQ_SET_DATA_BOOL | \
 601                                         VFIO_IRQ_SET_DATA_EVENTFD)
 602#define VFIO_IRQ_SET_ACTION_TYPE_MASK   (VFIO_IRQ_SET_ACTION_MASK | \
 603                                         VFIO_IRQ_SET_ACTION_UNMASK | \
 604                                         VFIO_IRQ_SET_ACTION_TRIGGER)
 605/**
 606 * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
 607 *
 608 * Reset a device.
 609 */
 610#define VFIO_DEVICE_RESET               _IO(VFIO_TYPE, VFIO_BASE + 11)
 611
 612/*
 613 * The VFIO-PCI bus driver makes use of the following fixed region and
 614 * IRQ index mapping.  Unimplemented regions return a size of zero.
 615 * Unimplemented IRQ types return a count of zero.
 616 */
 617
 618enum {
 619        VFIO_PCI_BAR0_REGION_INDEX,
 620        VFIO_PCI_BAR1_REGION_INDEX,
 621        VFIO_PCI_BAR2_REGION_INDEX,
 622        VFIO_PCI_BAR3_REGION_INDEX,
 623        VFIO_PCI_BAR4_REGION_INDEX,
 624        VFIO_PCI_BAR5_REGION_INDEX,
 625        VFIO_PCI_ROM_REGION_INDEX,
 626        VFIO_PCI_CONFIG_REGION_INDEX,
 627        /*
 628         * Expose VGA regions defined for PCI base class 03, subclass 00.
 629         * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
 630         * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
 631         * range is found at it's identity mapped offset from the region
 632         * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
 633         * between described ranges are unimplemented.
 634         */
 635        VFIO_PCI_VGA_REGION_INDEX,
 636        VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
 637                                 /* device specific cap to define content. */
 638};
 639
 640enum {
 641        VFIO_PCI_INTX_IRQ_INDEX,
 642        VFIO_PCI_MSI_IRQ_INDEX,
 643        VFIO_PCI_MSIX_IRQ_INDEX,
 644        VFIO_PCI_ERR_IRQ_INDEX,
 645        VFIO_PCI_REQ_IRQ_INDEX,
 646        VFIO_PCI_NUM_IRQS
 647};
 648
 649/*
 650 * The vfio-ccw bus driver makes use of the following fixed region and
 651 * IRQ index mapping. Unimplemented regions return a size of zero.
 652 * Unimplemented IRQ types return a count of zero.
 653 */
 654
 655enum {
 656        VFIO_CCW_CONFIG_REGION_INDEX,
 657        VFIO_CCW_NUM_REGIONS
 658};
 659
 660enum {
 661        VFIO_CCW_IO_IRQ_INDEX,
 662        VFIO_CCW_CRW_IRQ_INDEX,
 663        VFIO_CCW_REQ_IRQ_INDEX,
 664        VFIO_CCW_NUM_IRQS
 665};
 666
 667/*
 668 * The vfio-ap bus driver makes use of the following IRQ index mapping.
 669 * Unimplemented IRQ types return a count of zero.
 670 */
 671enum {
 672        VFIO_AP_REQ_IRQ_INDEX,
 673        VFIO_AP_NUM_IRQS
 674};
 675
 676/**
 677 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
 678 *                                            struct vfio_pci_hot_reset_info)
 679 *
 680 * Return: 0 on success, -errno on failure:
 681 *      -enospc = insufficient buffer, -enodev = unsupported for device.
 682 */
 683struct vfio_pci_dependent_device {
 684        __u32   group_id;
 685        __u16   segment;
 686        __u8    bus;
 687        __u8    devfn; /* Use PCI_SLOT/PCI_FUNC */
 688};
 689
 690struct vfio_pci_hot_reset_info {
 691        __u32   argsz;
 692        __u32   flags;
 693        __u32   count;
 694        struct vfio_pci_dependent_device        devices[];
 695};
 696
 697#define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO      _IO(VFIO_TYPE, VFIO_BASE + 12)
 698
 699/**
 700 * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
 701 *                                  struct vfio_pci_hot_reset)
 702 *
 703 * Return: 0 on success, -errno on failure.
 704 */
 705struct vfio_pci_hot_reset {
 706        __u32   argsz;
 707        __u32   flags;
 708        __u32   count;
 709        __s32   group_fds[];
 710};
 711
 712#define VFIO_DEVICE_PCI_HOT_RESET       _IO(VFIO_TYPE, VFIO_BASE + 13)
 713
 714/**
 715 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
 716 *                                    struct vfio_device_query_gfx_plane)
 717 *
 718 * Set the drm_plane_type and flags, then retrieve the gfx plane info.
 719 *
 720 * flags supported:
 721 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
 722 *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
 723 *   support for dma-buf.
 724 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
 725 *   to ask if the mdev supports region. 0 on support, -EINVAL on no
 726 *   support for region.
 727 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
 728 *   with each call to query the plane info.
 729 * - Others are invalid and return -EINVAL.
 730 *
 731 * Note:
 732 * 1. Plane could be disabled by guest. In that case, success will be
 733 *    returned with zero-initialized drm_format, size, width and height
 734 *    fields.
 735 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
 736 *
 737 * Return: 0 on success, -errno on other failure.
 738 */
 739struct vfio_device_gfx_plane_info {
 740        __u32 argsz;
 741        __u32 flags;
 742#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
 743#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
 744#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
 745        /* in */
 746        __u32 drm_plane_type;   /* type of plane: DRM_PLANE_TYPE_* */
 747        /* out */
 748        __u32 drm_format;       /* drm format of plane */
 749        __u64 drm_format_mod;   /* tiled mode */
 750        __u32 width;    /* width of plane */
 751        __u32 height;   /* height of plane */
 752        __u32 stride;   /* stride of plane */
 753        __u32 size;     /* size of plane in bytes, align on page*/
 754        __u32 x_pos;    /* horizontal position of cursor plane */
 755        __u32 y_pos;    /* vertical position of cursor plane*/
 756        __u32 x_hot;    /* horizontal position of cursor hotspot */
 757        __u32 y_hot;    /* vertical position of cursor hotspot */
 758        union {
 759                __u32 region_index;     /* region index */
 760                __u32 dmabuf_id;        /* dma-buf id */
 761        };
 762};
 763
 764#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
 765
 766/**
 767 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
 768 *
 769 * Return a new dma-buf file descriptor for an exposed guest framebuffer
 770 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
 771 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
 772 */
 773
 774#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
 775
 776/**
 777 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
 778 *                              struct vfio_device_ioeventfd)
 779 *
 780 * Perform a write to the device at the specified device fd offset, with
 781 * the specified data and width when the provided eventfd is triggered.
 782 * vfio bus drivers may not support this for all regions, for all widths,
 783 * or at all.  vfio-pci currently only enables support for BAR regions,
 784 * excluding the MSI-X vector table.
 785 *
 786 * Return: 0 on success, -errno on failure.
 787 */
 788struct vfio_device_ioeventfd {
 789        __u32   argsz;
 790        __u32   flags;
 791#define VFIO_DEVICE_IOEVENTFD_8         (1 << 0) /* 1-byte write */
 792#define VFIO_DEVICE_IOEVENTFD_16        (1 << 1) /* 2-byte write */
 793#define VFIO_DEVICE_IOEVENTFD_32        (1 << 2) /* 4-byte write */
 794#define VFIO_DEVICE_IOEVENTFD_64        (1 << 3) /* 8-byte write */
 795#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
 796        __u64   offset;                 /* device fd offset of write */
 797        __u64   data;                   /* data to be written */
 798        __s32   fd;                     /* -1 for de-assignment */
 799};
 800
 801#define VFIO_DEVICE_IOEVENTFD           _IO(VFIO_TYPE, VFIO_BASE + 16)
 802
 803/**
 804 * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
 805 *                             struct vfio_device_feature)
 806 *
 807 * Get, set, or probe feature data of the device.  The feature is selected
 808 * using the FEATURE_MASK portion of the flags field.  Support for a feature
 809 * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
 810 * may optionally include the GET and/or SET bits to determine read vs write
 811 * access of the feature respectively.  Probing a feature will return success
 812 * if the feature is supported and all of the optionally indicated GET/SET
 813 * methods are supported.  The format of the data portion of the structure is
 814 * specific to the given feature.  The data portion is not required for
 815 * probing.  GET and SET are mutually exclusive, except for use with PROBE.
 816 *
 817 * Return 0 on success, -errno on failure.
 818 */
 819struct vfio_device_feature {
 820        __u32   argsz;
 821        __u32   flags;
 822#define VFIO_DEVICE_FEATURE_MASK        (0xffff) /* 16-bit feature index */
 823#define VFIO_DEVICE_FEATURE_GET         (1 << 16) /* Get feature into data[] */
 824#define VFIO_DEVICE_FEATURE_SET         (1 << 17) /* Set feature from data[] */
 825#define VFIO_DEVICE_FEATURE_PROBE       (1 << 18) /* Probe feature support */
 826        __u8    data[];
 827};
 828
 829#define VFIO_DEVICE_FEATURE             _IO(VFIO_TYPE, VFIO_BASE + 17)
 830
 831/*
 832 * Provide support for setting a PCI VF Token, which is used as a shared
 833 * secret between PF and VF drivers.  This feature may only be set on a
 834 * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
 835 * open VFs.  Data provided when setting this feature is a 16-byte array
 836 * (__u8 b[16]), representing a UUID.
 837 */
 838#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN        (0)
 839
 840/*
 841 * Indicates the device can support the migration API through
 842 * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
 843 * ERROR states are always supported. Support for additional states is
 844 * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
 845 * set.
 846 *
 847 * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
 848 * RESUMING are supported.
 849 *
 850 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
 851 * is supported in addition to the STOP_COPY states.
 852 *
 853 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
 854 * PRE_COPY is supported in addition to the STOP_COPY states.
 855 *
 856 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
 857 * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
 858 * in addition to the STOP_COPY states.
 859 *
 860 * Other combinations of flags have behavior to be defined in the future.
 861 */
 862struct vfio_device_feature_migration {
 863        __aligned_u64 flags;
 864#define VFIO_MIGRATION_STOP_COPY        (1 << 0)
 865#define VFIO_MIGRATION_P2P              (1 << 1)
 866#define VFIO_MIGRATION_PRE_COPY         (1 << 2)
 867};
 868#define VFIO_DEVICE_FEATURE_MIGRATION 1
 869
 870/*
 871 * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
 872 * device. The new state is supplied in device_state, see enum
 873 * vfio_device_mig_state for details
 874 *
 875 * The kernel migration driver must fully transition the device to the new state
 876 * value before the operation returns to the user.
 877 *
 878 * The kernel migration driver must not generate asynchronous device state
 879 * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
 880 * ioctl as described above.
 881 *
 882 * If this function fails then current device_state may be the original
 883 * operating state or some other state along the combination transition path.
 884 * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
 885 * to return to the original state, or attempt to return to some other state
 886 * such as RUNNING or STOP.
 887 *
 888 * If the new_state starts a new data transfer session then the FD associated
 889 * with that session is returned in data_fd. The user is responsible to close
 890 * this FD when it is finished. The user must consider the migration data stream
 891 * carried over the FD to be opaque and must preserve the byte order of the
 892 * stream. The user is not required to preserve buffer segmentation when writing
 893 * the data stream during the RESUMING operation.
 894 *
 895 * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
 896 * device, data_fd will be -1.
 897 */
 898struct vfio_device_feature_mig_state {
 899        __u32 device_state; /* From enum vfio_device_mig_state */
 900        __s32 data_fd;
 901};
 902#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
 903
 904/*
 905 * The device migration Finite State Machine is described by the enum
 906 * vfio_device_mig_state. Some of the FSM arcs will create a migration data
 907 * transfer session by returning a FD, in this case the migration data will
 908 * flow over the FD using read() and write() as discussed below.
 909 *
 910 * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
 911 *  RUNNING - The device is running normally
 912 *  STOP - The device does not change the internal or external state
 913 *  STOP_COPY - The device internal state can be read out
 914 *  RESUMING - The device is stopped and is loading a new internal state
 915 *  ERROR - The device has failed and must be reset
 916 *
 917 * And optional states to support VFIO_MIGRATION_P2P:
 918 *  RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
 919 * And VFIO_MIGRATION_PRE_COPY:
 920 *  PRE_COPY - The device is running normally but tracking internal state
 921 *             changes
 922 * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
 923 *  PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
 924 *
 925 * The FSM takes actions on the arcs between FSM states. The driver implements
 926 * the following behavior for the FSM arcs:
 927 *
 928 * RUNNING_P2P -> STOP
 929 * STOP_COPY -> STOP
 930 *   While in STOP the device must stop the operation of the device. The device
 931 *   must not generate interrupts, DMA, or any other change to external state.
 932 *   It must not change its internal state. When stopped the device and kernel
 933 *   migration driver must accept and respond to interaction to support external
 934 *   subsystems in the STOP state, for example PCI MSI-X and PCI config space.
 935 *   Failure by the user to restrict device access while in STOP must not result
 936 *   in error conditions outside the user context (ex. host system faults).
 937 *
 938 *   The STOP_COPY arc will terminate a data transfer session.
 939 *
 940 * RESUMING -> STOP
 941 *   Leaving RESUMING terminates a data transfer session and indicates the
 942 *   device should complete processing of the data delivered by write(). The
 943 *   kernel migration driver should complete the incorporation of data written
 944 *   to the data transfer FD into the device internal state and perform
 945 *   final validity and consistency checking of the new device state. If the
 946 *   user provided data is found to be incomplete, inconsistent, or otherwise
 947 *   invalid, the migration driver must fail the SET_STATE ioctl and
 948 *   optionally go to the ERROR state as described below.
 949 *
 950 *   While in STOP the device has the same behavior as other STOP states
 951 *   described above.
 952 *
 953 *   To abort a RESUMING session the device must be reset.
 954 *
 955 * PRE_COPY -> RUNNING
 956 * RUNNING_P2P -> RUNNING
 957 *   While in RUNNING the device is fully operational, the device may generate
 958 *   interrupts, DMA, respond to MMIO, all vfio device regions are functional,
 959 *   and the device may advance its internal state.
 960 *
 961 *   The PRE_COPY arc will terminate a data transfer session.
 962 *
 963 * PRE_COPY_P2P -> RUNNING_P2P
 964 * RUNNING -> RUNNING_P2P
 965 * STOP -> RUNNING_P2P
 966 *   While in RUNNING_P2P the device is partially running in the P2P quiescent
 967 *   state defined below.
 968 *
 969 *   The PRE_COPY_P2P arc will terminate a data transfer session.
 970 *
 971 * RUNNING -> PRE_COPY
 972 * RUNNING_P2P -> PRE_COPY_P2P
 973 * STOP -> STOP_COPY
 974 *   PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
 975 *   which share a data transfer session. Moving between these states alters
 976 *   what is streamed in session, but does not terminate or otherwise affect
 977 *   the associated fd.
 978 *
 979 *   These arcs begin the process of saving the device state and will return a
 980 *   new data_fd. The migration driver may perform actions such as enabling
 981 *   dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
 982 *
 983 *   Each arc does not change the device operation, the device remains
 984 *   RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
 985 *   in PRE_COPY_P2P -> STOP_COPY.
 986 *
 987 * PRE_COPY -> PRE_COPY_P2P
 988 *   Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
 989 *   However, while in the PRE_COPY_P2P state, the device is partially running
 990 *   in the P2P quiescent state defined below, like RUNNING_P2P.
 991 *
 992 * PRE_COPY_P2P -> PRE_COPY
 993 *   This arc allows returning the device to a full RUNNING behavior while
 994 *   continuing all the behaviors of PRE_COPY.
 995 *
 996 * PRE_COPY_P2P -> STOP_COPY
 997 *   While in the STOP_COPY state the device has the same behavior as STOP
 998 *   with the addition that the data transfers session continues to stream the
 999 *   migration state. End of stream on the FD indicates the entire device
1000 *   state has been transferred.
1001 *
1002 *   The user should take steps to restrict access to vfio device regions while
1003 *   the device is in STOP_COPY or risk corruption of the device migration data
1004 *   stream.
1005 *
1006 * STOP -> RESUMING
1007 *   Entering the RESUMING state starts a process of restoring the device state
1008 *   and will return a new data_fd. The data stream fed into the data_fd should
1009 *   be taken from the data transfer output of a single FD during saving from
1010 *   a compatible device. The migration driver may alter/reset the internal
1011 *   device state for this arc if required to prepare the device to receive the
1012 *   migration data.
1013 *
1014 * STOP_COPY -> PRE_COPY
1015 * STOP_COPY -> PRE_COPY_P2P
1016 *   These arcs are not permitted and return error if requested. Future
1017 *   revisions of this API may define behaviors for these arcs, in this case
1018 *   support will be discoverable by a new flag in
1019 *   VFIO_DEVICE_FEATURE_MIGRATION.
1020 *
1021 * any -> ERROR
1022 *   ERROR cannot be specified as a device state, however any transition request
1023 *   can be failed with an errno return and may then move the device_state into
1024 *   ERROR. In this case the device was unable to execute the requested arc and
1025 *   was also unable to restore the device to any valid device_state.
1026 *   To recover from ERROR VFIO_DEVICE_RESET must be used to return the
1027 *   device_state back to RUNNING.
1028 *
1029 * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
1030 * state for the device for the purposes of managing multiple devices within a
1031 * user context where peer-to-peer DMA between devices may be active. The
1032 * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
1033 * any new P2P DMA transactions. If the device can identify P2P transactions
1034 * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
1035 * driver must complete any such outstanding operations prior to completing the
1036 * FSM arc into a P2P state. For the purpose of specification the states
1037 * behave as though the device was fully running if not supported. Like while in
1038 * STOP or STOP_COPY the user must not touch the device, otherwise the state
1039 * can be exited.
1040 *
1041 * The remaining possible transitions are interpreted as combinations of the
1042 * above FSM arcs. As there are multiple paths through the FSM arcs the path
1043 * should be selected based on the following rules:
1044 *   - Select the shortest path.
1045 *   - The path cannot have saving group states as interior arcs, only
1046 *     starting/end states.
1047 * Refer to vfio_mig_get_next_state() for the result of the algorithm.
1048 *
1049 * The automatic transit through the FSM arcs that make up the combination
1050 * transition is invisible to the user. When working with combination arcs the
1051 * user may see any step along the path in the device_state if SET_STATE
1052 * fails. When handling these types of errors users should anticipate future
1053 * revisions of this protocol using new states and those states becoming
1054 * visible in this case.
1055 *
1056 * The optional states cannot be used with SET_STATE if the device does not
1057 * support them. The user can discover if these states are supported by using
1058 * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
1059 * avoid knowing about these optional states if the kernel driver supports them.
1060 *
1061 * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
1062 * is not present.
1063 */
1064enum vfio_device_mig_state {
1065        VFIO_DEVICE_STATE_ERROR = 0,
1066        VFIO_DEVICE_STATE_STOP = 1,
1067        VFIO_DEVICE_STATE_RUNNING = 2,
1068        VFIO_DEVICE_STATE_STOP_COPY = 3,
1069        VFIO_DEVICE_STATE_RESUMING = 4,
1070        VFIO_DEVICE_STATE_RUNNING_P2P = 5,
1071        VFIO_DEVICE_STATE_PRE_COPY = 6,
1072        VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
1073};
1074
1075/**
1076 * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
1077 *
1078 * This ioctl is used on the migration data FD in the precopy phase of the
1079 * migration data transfer. It returns an estimate of the current data sizes
1080 * remaining to be transferred. It allows the user to judge when it is
1081 * appropriate to leave PRE_COPY for STOP_COPY.
1082 *
1083 * This ioctl is valid only in PRE_COPY states and kernel driver should
1084 * return -EINVAL from any other migration state.
1085 *
1086 * The vfio_precopy_info data structure returned by this ioctl provides
1087 * estimates of data available from the device during the PRE_COPY states.
1088 * This estimate is split into two categories, initial_bytes and
1089 * dirty_bytes.
1090 *
1091 * The initial_bytes field indicates the amount of initial precopy
1092 * data available from the device. This field should have a non-zero initial
1093 * value and decrease as migration data is read from the device.
1094 * It is recommended to leave PRE_COPY for STOP_COPY only after this field
1095 * reaches zero. Leaving PRE_COPY earlier might make things slower.
1096 *
1097 * The dirty_bytes field tracks device state changes relative to data
1098 * previously retrieved.  This field starts at zero and may increase as
1099 * the internal device state is modified or decrease as that modified
1100 * state is read from the device.
1101 *
1102 * Userspace may use the combination of these fields to estimate the
1103 * potential data size available during the PRE_COPY phases, as well as
1104 * trends relative to the rate the device is dirtying its internal
1105 * state, but these fields are not required to have any bearing relative
1106 * to the data size available during the STOP_COPY phase.
1107 *
1108 * Drivers have a lot of flexibility in when and what they transfer during the
1109 * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
1110 *
1111 * During pre-copy the migration data FD has a temporary "end of stream" that is
1112 * reached when both initial_bytes and dirty_byte are zero. For instance, this
1113 * may indicate that the device is idle and not currently dirtying any internal
1114 * state. When read() is done on this temporary end of stream the kernel driver
1115 * should return ENOMSG from read(). Userspace can wait for more data (which may
1116 * never come) by using poll.
1117 *
1118 * Once in STOP_COPY the migration data FD has a permanent end of stream
1119 * signaled in the usual way by read() always returning 0 and poll always
1120 * returning readable. ENOMSG may not be returned in STOP_COPY.
1121 * Support for this ioctl is mandatory if a driver claims to support
1122 * VFIO_MIGRATION_PRE_COPY.
1123 *
1124 * Return: 0 on success, -1 and errno set on failure.
1125 */
1126struct vfio_precopy_info {
1127        __u32 argsz;
1128        __u32 flags;
1129        __aligned_u64 initial_bytes;
1130        __aligned_u64 dirty_bytes;
1131};
1132
1133#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
1134
1135/*
1136 * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
1137 * state with the platform-based power management.  Device use of lower power
1138 * states depends on factors managed by the runtime power management core,
1139 * including system level support and coordinating support among dependent
1140 * devices.  Enabling device low power entry does not guarantee lower power
1141 * usage by the device, nor is a mechanism provided through this feature to
1142 * know the current power state of the device.  If any device access happens
1143 * (either from the host or through the vfio uAPI) when the device is in the
1144 * low power state, then the host will move the device out of the low power
1145 * state as necessary prior to the access.  Once the access is completed, the
1146 * device may re-enter the low power state.  For single shot low power support
1147 * with wake-up notification, see
1148 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below.  Access to mmap'd
1149 * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
1150 * calling LOW_POWER_EXIT.
1151 */
1152#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
1153
1154/*
1155 * This device feature has the same behavior as
1156 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
1157 * provides an eventfd for wake-up notification.  When the device moves out of
1158 * the low power state for the wake-up, the host will not allow the device to
1159 * re-enter a low power state without a subsequent user call to one of the low
1160 * power entry device feature IOCTLs.  Access to mmap'd device regions is
1161 * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
1162 * low power exit.  The low power exit can happen either through LOW_POWER_EXIT
1163 * or through any other access (where the wake-up notification has been
1164 * generated).  The access to mmap'd device regions will not trigger low power
1165 * exit.
1166 *
1167 * The notification through the provided eventfd will be generated only when
1168 * the device has entered and is resumed from a low power state after
1169 * calling this device feature IOCTL.  A device that has not entered low power
1170 * state, as managed through the runtime power management core, will not
1171 * generate a notification through the provided eventfd on access.  Calling the
1172 * LOW_POWER_EXIT feature is optional in the case where notification has been
1173 * signaled on the provided eventfd that a resume from low power has occurred.
1174 */
1175struct vfio_device_low_power_entry_with_wakeup {
1176        __s32 wakeup_eventfd;
1177        __u32 reserved;
1178};
1179
1180#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
1181
1182/*
1183 * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
1184 * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
1185 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
1186 * This device feature IOCTL may itself generate a wakeup eventfd notification
1187 * in the latter case if the device had previously entered a low power state.
1188 */
1189#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
1190
1191/*
1192 * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
1193 * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
1194 * DMA logging.
1195 *
1196 * DMA logging allows a device to internally record what DMAs the device is
1197 * initiating and report them back to userspace. It is part of the VFIO
1198 * migration infrastructure that allows implementing dirty page tracking
1199 * during the pre copy phase of live migration. Only DMA WRITEs are logged,
1200 * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
1201 *
1202 * When DMA logging is started a range of IOVAs to monitor is provided and the
1203 * device can optimize its logging to cover only the IOVA range given. Each
1204 * DMA that the device initiates inside the range will be logged by the device
1205 * for later retrieval.
1206 *
1207 * page_size is an input that hints what tracking granularity the device
1208 * should try to achieve. If the device cannot do the hinted page size then
1209 * it's the driver choice which page size to pick based on its support.
1210 * On output the device will return the page size it selected.
1211 *
1212 * ranges is a pointer to an array of
1213 * struct vfio_device_feature_dma_logging_range.
1214 *
1215 * The core kernel code guarantees to support by minimum num_ranges that fit
1216 * into a single kernel page. User space can try higher values but should give
1217 * up if the above can't be achieved as of some driver limitations.
1218 *
1219 * A single call to start device DMA logging can be issued and a matching stop
1220 * should follow at the end. Another start is not allowed in the meantime.
1221 */
1222struct vfio_device_feature_dma_logging_control {
1223        __aligned_u64 page_size;
1224        __u32 num_ranges;
1225        __u32 __reserved;
1226        __aligned_u64 ranges;
1227};
1228
1229struct vfio_device_feature_dma_logging_range {
1230        __aligned_u64 iova;
1231        __aligned_u64 length;
1232};
1233
1234#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
1235
1236/*
1237 * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
1238 * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
1239 */
1240#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
1241
1242/*
1243 * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
1244 *
1245 * Query the device's DMA log for written pages within the given IOVA range.
1246 * During querying the log is cleared for the IOVA range.
1247 *
1248 * bitmap is a pointer to an array of u64s that will hold the output bitmap
1249 * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
1250 * is given by:
1251 *  bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
1252 *
1253 * The input page_size can be any power of two value and does not have to
1254 * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
1255 * will format its internal logging to match the reporting page size, possibly
1256 * by replicating bits if the internal page size is lower than requested.
1257 *
1258 * The LOGGING_REPORT will only set bits in the bitmap and never clear or
1259 * perform any initialization of the user provided bitmap.
1260 *
1261 * If any error is returned userspace should assume that the dirty log is
1262 * corrupted. Error recovery is to consider all memory dirty and try to
1263 * restart the dirty tracking, or to abort/restart the whole migration.
1264 *
1265 * If DMA logging is not enabled, an error will be returned.
1266 *
1267 */
1268struct vfio_device_feature_dma_logging_report {
1269        __aligned_u64 iova;
1270        __aligned_u64 length;
1271        __aligned_u64 page_size;
1272        __aligned_u64 bitmap;
1273};
1274
1275#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
1276
1277/*
1278 * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
1279 * be required to complete stop copy.
1280 *
1281 * Note: Can be called on each device state.
1282 */
1283
1284struct vfio_device_feature_mig_data_size {
1285        __aligned_u64 stop_copy_length;
1286};
1287
1288#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
1289
1290/* -------- API for Type1 VFIO IOMMU -------- */
1291
1292/**
1293 * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
1294 *
1295 * Retrieve information about the IOMMU object. Fills in provided
1296 * struct vfio_iommu_info. Caller sets argsz.
1297 *
1298 * XXX Should we do these by CHECK_EXTENSION too?
1299 */
1300struct vfio_iommu_type1_info {
1301        __u32   argsz;
1302        __u32   flags;
1303#define VFIO_IOMMU_INFO_PGSIZES (1 << 0)        /* supported page sizes info */
1304#define VFIO_IOMMU_INFO_CAPS    (1 << 1)        /* Info supports caps */
1305        __u64   iova_pgsizes;   /* Bitmap of supported page sizes */
1306        __u32   cap_offset;     /* Offset within info struct of first cap */
1307};
1308
1309/*
1310 * The IOVA capability allows to report the valid IOVA range(s)
1311 * excluding any non-relaxable reserved regions exposed by
1312 * devices attached to the container. Any DMA map attempt
1313 * outside the valid iova range will return error.
1314 *
1315 * The structures below define version 1 of this capability.
1316 */
1317#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
1318
1319struct vfio_iova_range {
1320        __u64   start;
1321        __u64   end;
1322};
1323
1324struct vfio_iommu_type1_info_cap_iova_range {
1325        struct  vfio_info_cap_header header;
1326        __u32   nr_iovas;
1327        __u32   reserved;
1328        struct  vfio_iova_range iova_ranges[];
1329};
1330
1331/*
1332 * The migration capability allows to report supported features for migration.
1333 *
1334 * The structures below define version 1 of this capability.
1335 *
1336 * The existence of this capability indicates that IOMMU kernel driver supports
1337 * dirty page logging.
1338 *
1339 * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
1340 * page logging.
1341 * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
1342 * size in bytes that can be used by user applications when getting the dirty
1343 * bitmap.
1344 */
1345#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
1346
1347struct vfio_iommu_type1_info_cap_migration {
1348        struct  vfio_info_cap_header header;
1349        __u32   flags;
1350        __u64   pgsize_bitmap;
1351        __u64   max_dirty_bitmap_size;          /* in bytes */
1352};
1353
1354/*
1355 * The DMA available capability allows to report the current number of
1356 * simultaneously outstanding DMA mappings that are allowed.
1357 *
1358 * The structure below defines version 1 of this capability.
1359 *
1360 * avail: specifies the current number of outstanding DMA mappings allowed.
1361 */
1362#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
1363
1364struct vfio_iommu_type1_info_dma_avail {
1365        struct  vfio_info_cap_header header;
1366        __u32   avail;
1367};
1368
1369#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
1370
1371/**
1372 * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
1373 *
1374 * Map process virtual addresses to IO virtual addresses using the
1375 * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
1376 *
1377 * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
1378 * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
1379 * maintain memory consistency within the user application, the updated vaddr
1380 * must address the same memory object as originally mapped.  Failure to do so
1381 * will result in user memory corruption and/or device misbehavior.  iova and
1382 * size must match those in the original MAP_DMA call.  Protection is not
1383 * changed, and the READ & WRITE flags must be 0.
1384 */
1385struct vfio_iommu_type1_dma_map {
1386        __u32   argsz;
1387        __u32   flags;
1388#define VFIO_DMA_MAP_FLAG_READ (1 << 0)         /* readable from device */
1389#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)        /* writable from device */
1390#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
1391        __u64   vaddr;                          /* Process virtual address */
1392        __u64   iova;                           /* IO virtual address */
1393        __u64   size;                           /* Size of mapping (bytes) */
1394};
1395
1396#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
1397
1398struct vfio_bitmap {
1399        __u64        pgsize;    /* page size for bitmap in bytes */
1400        __u64        size;      /* in bytes */
1401        __u64 *data;    /* one bit per page */
1402};
1403
1404/**
1405 * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
1406 *                                                      struct vfio_dma_unmap)
1407 *
1408 * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
1409 * Caller sets argsz.  The actual unmapped size is returned in the size
1410 * field.  No guarantee is made to the user that arbitrary unmaps of iova
1411 * or size different from those used in the original mapping call will
1412 * succeed.
1413 *
1414 * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
1415 * before unmapping IO virtual addresses. When this flag is set, the user must
1416 * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
1417 * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
1418 * A bit in the bitmap represents one page, of user provided page size in
1419 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1420 * indicates that the page at that offset from iova is dirty. A Bitmap of the
1421 * pages in the range of unmapped size is returned in the user-provided
1422 * vfio_bitmap.data.
1423 *
1424 * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses.  iova and size
1425 * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
1426 *
1427 * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
1428 * virtual addresses in the iova range.  DMA to already-mapped pages continues.
1429 * Groups may not be added to the container while any addresses are invalid.
1430 * This cannot be combined with the get-dirty-bitmap flag.
1431 */
1432struct vfio_iommu_type1_dma_unmap {
1433        __u32   argsz;
1434        __u32   flags;
1435#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
1436#define VFIO_DMA_UNMAP_FLAG_ALL              (1 << 1)
1437#define VFIO_DMA_UNMAP_FLAG_VADDR            (1 << 2)
1438        __u64   iova;                           /* IO virtual address */
1439        __u64   size;                           /* Size of mapping (bytes) */
1440        __u8    data[];
1441};
1442
1443#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
1444
1445/*
1446 * IOCTLs to enable/disable IOMMU container usage.
1447 * No parameters are supported.
1448 */
1449#define VFIO_IOMMU_ENABLE       _IO(VFIO_TYPE, VFIO_BASE + 15)
1450#define VFIO_IOMMU_DISABLE      _IO(VFIO_TYPE, VFIO_BASE + 16)
1451
1452/**
1453 * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
1454 *                                     struct vfio_iommu_type1_dirty_bitmap)
1455 * IOCTL is used for dirty pages logging.
1456 * Caller should set flag depending on which operation to perform, details as
1457 * below:
1458 *
1459 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
1460 * the IOMMU driver to log pages that are dirtied or potentially dirtied by
1461 * the device; designed to be used when a migration is in progress. Dirty pages
1462 * are logged until logging is disabled by user application by calling the IOCTL
1463 * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
1464 *
1465 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
1466 * the IOMMU driver to stop logging dirtied pages.
1467 *
1468 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
1469 * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
1470 * The user must specify the IOVA range and the pgsize through the structure
1471 * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
1472 * supports getting a bitmap of the smallest supported pgsize only and can be
1473 * modified in future to get a bitmap of any specified supported pgsize. The
1474 * user must provide a zeroed memory area for the bitmap memory and specify its
1475 * size in bitmap.size. One bit is used to represent one page consecutively
1476 * starting from iova offset. The user should provide page size in bitmap.pgsize
1477 * field. A bit set in the bitmap indicates that the page at that offset from
1478 * iova is dirty. The caller must set argsz to a value including the size of
1479 * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
1480 * actual bitmap. If dirty pages logging is not enabled, an error will be
1481 * returned.
1482 *
1483 * Only one of the flags _START, _STOP and _GET may be specified at a time.
1484 *
1485 */
1486struct vfio_iommu_type1_dirty_bitmap {
1487        __u32        argsz;
1488        __u32        flags;
1489#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START       (1 << 0)
1490#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP        (1 << 1)
1491#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP  (1 << 2)
1492        __u8         data[];
1493};
1494
1495struct vfio_iommu_type1_dirty_bitmap_get {
1496        __u64              iova;        /* IO virtual address */
1497        __u64              size;        /* Size of iova range */
1498        struct vfio_bitmap bitmap;
1499};
1500
1501#define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
1502
1503/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
1504
1505/*
1506 * The SPAPR TCE DDW info struct provides the information about
1507 * the details of Dynamic DMA window capability.
1508 *
1509 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
1510 * @max_dynamic_windows_supported tells the maximum number of windows
1511 * which the platform can create.
1512 * @levels tells the maximum number of levels in multi-level IOMMU tables;
1513 * this allows splitting a table into smaller chunks which reduces
1514 * the amount of physically contiguous memory required for the table.
1515 */
1516struct vfio_iommu_spapr_tce_ddw_info {
1517        __u64 pgsizes;                  /* Bitmap of supported page sizes */
1518        __u32 max_dynamic_windows_supported;
1519        __u32 levels;
1520};
1521
1522/*
1523 * The SPAPR TCE info struct provides the information about the PCI bus
1524 * address ranges available for DMA, these values are programmed into
1525 * the hardware so the guest has to know that information.
1526 *
1527 * The DMA 32 bit window start is an absolute PCI bus address.
1528 * The IOVA address passed via map/unmap ioctls are absolute PCI bus
1529 * addresses too so the window works as a filter rather than an offset
1530 * for IOVA addresses.
1531 *
1532 * Flags supported:
1533 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
1534 *   (DDW) support is present. @ddw is only supported when DDW is present.
1535 */
1536struct vfio_iommu_spapr_tce_info {
1537        __u32 argsz;
1538        __u32 flags;
1539#define VFIO_IOMMU_SPAPR_INFO_DDW       (1 << 0)        /* DDW supported */
1540        __u32 dma32_window_start;       /* 32 bit window start (bytes) */
1541        __u32 dma32_window_size;        /* 32 bit window size (bytes) */
1542        struct vfio_iommu_spapr_tce_ddw_info ddw;
1543};
1544
1545#define VFIO_IOMMU_SPAPR_TCE_GET_INFO   _IO(VFIO_TYPE, VFIO_BASE + 12)
1546
1547/*
1548 * EEH PE operation struct provides ways to:
1549 * - enable/disable EEH functionality;
1550 * - unfreeze IO/DMA for frozen PE;
1551 * - read PE state;
1552 * - reset PE;
1553 * - configure PE;
1554 * - inject EEH error.
1555 */
1556struct vfio_eeh_pe_err {
1557        __u32 type;
1558        __u32 func;
1559        __u64 addr;
1560        __u64 mask;
1561};
1562
1563struct vfio_eeh_pe_op {
1564        __u32 argsz;
1565        __u32 flags;
1566        __u32 op;
1567        union {
1568                struct vfio_eeh_pe_err err;
1569        };
1570};
1571
1572#define VFIO_EEH_PE_DISABLE             0       /* Disable EEH functionality */
1573#define VFIO_EEH_PE_ENABLE              1       /* Enable EEH functionality  */
1574#define VFIO_EEH_PE_UNFREEZE_IO         2       /* Enable IO for frozen PE   */
1575#define VFIO_EEH_PE_UNFREEZE_DMA        3       /* Enable DMA for frozen PE  */
1576#define VFIO_EEH_PE_GET_STATE           4       /* PE state retrieval        */
1577#define  VFIO_EEH_PE_STATE_NORMAL       0       /* PE in functional state    */
1578#define  VFIO_EEH_PE_STATE_RESET        1       /* PE reset in progress      */
1579#define  VFIO_EEH_PE_STATE_STOPPED      2       /* Stopped DMA and IO        */
1580#define  VFIO_EEH_PE_STATE_STOPPED_DMA  4       /* Stopped DMA only          */
1581#define  VFIO_EEH_PE_STATE_UNAVAIL      5       /* State unavailable         */
1582#define VFIO_EEH_PE_RESET_DEACTIVATE    5       /* Deassert PE reset         */
1583#define VFIO_EEH_PE_RESET_HOT           6       /* Assert hot reset          */
1584#define VFIO_EEH_PE_RESET_FUNDAMENTAL   7       /* Assert fundamental reset  */
1585#define VFIO_EEH_PE_CONFIGURE           8       /* PE configuration          */
1586#define VFIO_EEH_PE_INJECT_ERR          9       /* Inject EEH error          */
1587
1588#define VFIO_EEH_PE_OP                  _IO(VFIO_TYPE, VFIO_BASE + 21)
1589
1590/**
1591 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
1592 *
1593 * Registers user space memory where DMA is allowed. It pins
1594 * user pages and does the locked memory accounting so
1595 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
1596 * get faster.
1597 */
1598struct vfio_iommu_spapr_register_memory {
1599        __u32   argsz;
1600        __u32   flags;
1601        __u64   vaddr;                          /* Process virtual address */
1602        __u64   size;                           /* Size of mapping (bytes) */
1603};
1604#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY        _IO(VFIO_TYPE, VFIO_BASE + 17)
1605
1606/**
1607 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
1608 *
1609 * Unregisters user space memory registered with
1610 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
1611 * Uses vfio_iommu_spapr_register_memory for parameters.
1612 */
1613#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY      _IO(VFIO_TYPE, VFIO_BASE + 18)
1614
1615/**
1616 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
1617 *
1618 * Creates an additional TCE table and programs it (sets a new DMA window)
1619 * to every IOMMU group in the container. It receives page shift, window
1620 * size and number of levels in the TCE table being created.
1621 *
1622 * It allocates and returns an offset on a PCI bus of the new DMA window.
1623 */
1624struct vfio_iommu_spapr_tce_create {
1625        __u32 argsz;
1626        __u32 flags;
1627        /* in */
1628        __u32 page_shift;
1629        __u32 __resv1;
1630        __u64 window_size;
1631        __u32 levels;
1632        __u32 __resv2;
1633        /* out */
1634        __u64 start_addr;
1635};
1636#define VFIO_IOMMU_SPAPR_TCE_CREATE     _IO(VFIO_TYPE, VFIO_BASE + 19)
1637
1638/**
1639 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
1640 *
1641 * Unprograms a TCE table from all groups in the container and destroys it.
1642 * It receives a PCI bus offset as a window id.
1643 */
1644struct vfio_iommu_spapr_tce_remove {
1645        __u32 argsz;
1646        __u32 flags;
1647        /* in */
1648        __u64 start_addr;
1649};
1650#define VFIO_IOMMU_SPAPR_TCE_REMOVE     _IO(VFIO_TYPE, VFIO_BASE + 20)
1651
1652/* ***************************************************************** */
1653
1654#endif /* VFIO_H */
1655