qemu/linux-headers/linux/vfio.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
   2/*
   3 * VFIO API definition
   4 *
   5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
   6 *     Author: Alex Williamson <alex.williamson@redhat.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#ifndef VFIO_H
  13#define VFIO_H
  14
  15#include <linux/types.h>
  16#include <linux/ioctl.h>
  17
  18#define VFIO_API_VERSION        0
  19
  20
  21/* Kernel & User level defines for VFIO IOCTLs. */
  22
  23/* Extensions */
  24
  25#define VFIO_TYPE1_IOMMU                1
  26#define VFIO_SPAPR_TCE_IOMMU            2
  27#define VFIO_TYPE1v2_IOMMU              3
  28/*
  29 * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
  30 * capability is subject to change as groups are added or removed.
  31 */
  32#define VFIO_DMA_CC_IOMMU               4
  33
  34/* Check if EEH is supported */
  35#define VFIO_EEH                        5
  36
  37/* Two-stage IOMMU */
  38#define VFIO_TYPE1_NESTING_IOMMU        6       /* Implies v2 */
  39
  40#define VFIO_SPAPR_TCE_v2_IOMMU         7
  41
  42/*
  43 * The No-IOMMU IOMMU offers no translation or isolation for devices and
  44 * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
  45 * code will taint the host kernel and should be used with extreme caution.
  46 */
  47#define VFIO_NOIOMMU_IOMMU              8
  48
  49/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
  50#define VFIO_UNMAP_ALL                  9
  51
  52/* Supports the vaddr flag for DMA map and unmap */
  53#define VFIO_UPDATE_VADDR               10
  54
  55/*
  56 * The IOCTL interface is designed for extensibility by embedding the
  57 * structure length (argsz) and flags into structures passed between
  58 * kernel and userspace.  We therefore use the _IO() macro for these
  59 * defines to avoid implicitly embedding a size into the ioctl request.
  60 * As structure fields are added, argsz will increase to match and flag
  61 * bits will be defined to indicate additional fields with valid data.
  62 * It's *always* the caller's responsibility to indicate the size of
  63 * the structure passed by setting argsz appropriately.
  64 */
  65
  66#define VFIO_TYPE       (';')
  67#define VFIO_BASE       100
  68
  69/*
  70 * For extension of INFO ioctls, VFIO makes use of a capability chain
  71 * designed after PCI/e capabilities.  A flag bit indicates whether
  72 * this capability chain is supported and a field defined in the fixed
  73 * structure defines the offset of the first capability in the chain.
  74 * This field is only valid when the corresponding bit in the flags
  75 * bitmap is set.  This offset field is relative to the start of the
  76 * INFO buffer, as is the next field within each capability header.
  77 * The id within the header is a shared address space per INFO ioctl,
  78 * while the version field is specific to the capability id.  The
  79 * contents following the header are specific to the capability id.
  80 */
  81struct vfio_info_cap_header {
  82        __u16   id;             /* Identifies capability */
  83        __u16   version;        /* Version specific to the capability ID */
  84        __u32   next;           /* Offset of next capability */
  85};
  86
  87/*
  88 * Callers of INFO ioctls passing insufficiently sized buffers will see
  89 * the capability chain flag bit set, a zero value for the first capability
  90 * offset (if available within the provided argsz), and argsz will be
  91 * updated to report the necessary buffer size.  For compatibility, the
  92 * INFO ioctl will not report error in this case, but the capability chain
  93 * will not be available.
  94 */
  95
  96/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
  97
  98/**
  99 * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
 100 *
 101 * Report the version of the VFIO API.  This allows us to bump the entire
 102 * API version should we later need to add or change features in incompatible
 103 * ways.
 104 * Return: VFIO_API_VERSION
 105 * Availability: Always
 106 */
 107#define VFIO_GET_API_VERSION            _IO(VFIO_TYPE, VFIO_BASE + 0)
 108
 109/**
 110 * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
 111 *
 112 * Check whether an extension is supported.
 113 * Return: 0 if not supported, 1 (or some other positive integer) if supported.
 114 * Availability: Always
 115 */
 116#define VFIO_CHECK_EXTENSION            _IO(VFIO_TYPE, VFIO_BASE + 1)
 117
 118/**
 119 * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
 120 *
 121 * Set the iommu to the given type.  The type must be supported by an
 122 * iommu driver as verified by calling CHECK_EXTENSION using the same
 123 * type.  A group must be set to this file descriptor before this
 124 * ioctl is available.  The IOMMU interfaces enabled by this call are
 125 * specific to the value set.
 126 * Return: 0 on success, -errno on failure
 127 * Availability: When VFIO group attached
 128 */
 129#define VFIO_SET_IOMMU                  _IO(VFIO_TYPE, VFIO_BASE + 2)
 130
 131/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
 132
 133/**
 134 * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
 135 *                                              struct vfio_group_status)
 136 *
 137 * Retrieve information about the group.  Fills in provided
 138 * struct vfio_group_info.  Caller sets argsz.
 139 * Return: 0 on succes, -errno on failure.
 140 * Availability: Always
 141 */
 142struct vfio_group_status {
 143        __u32   argsz;
 144        __u32   flags;
 145#define VFIO_GROUP_FLAGS_VIABLE         (1 << 0)
 146#define VFIO_GROUP_FLAGS_CONTAINER_SET  (1 << 1)
 147};
 148#define VFIO_GROUP_GET_STATUS           _IO(VFIO_TYPE, VFIO_BASE + 3)
 149
 150/**
 151 * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
 152 *
 153 * Set the container for the VFIO group to the open VFIO file
 154 * descriptor provided.  Groups may only belong to a single
 155 * container.  Containers may, at their discretion, support multiple
 156 * groups.  Only when a container is set are all of the interfaces
 157 * of the VFIO file descriptor and the VFIO group file descriptor
 158 * available to the user.
 159 * Return: 0 on success, -errno on failure.
 160 * Availability: Always
 161 */
 162#define VFIO_GROUP_SET_CONTAINER        _IO(VFIO_TYPE, VFIO_BASE + 4)
 163
 164/**
 165 * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
 166 *
 167 * Remove the group from the attached container.  This is the
 168 * opposite of the SET_CONTAINER call and returns the group to
 169 * an initial state.  All device file descriptors must be released
 170 * prior to calling this interface.  When removing the last group
 171 * from a container, the IOMMU will be disabled and all state lost,
 172 * effectively also returning the VFIO file descriptor to an initial
 173 * state.
 174 * Return: 0 on success, -errno on failure.
 175 * Availability: When attached to container
 176 */
 177#define VFIO_GROUP_UNSET_CONTAINER      _IO(VFIO_TYPE, VFIO_BASE + 5)
 178
 179/**
 180 * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
 181 *
 182 * Return a new file descriptor for the device object described by
 183 * the provided string.  The string should match a device listed in
 184 * the devices subdirectory of the IOMMU group sysfs entry.  The
 185 * group containing the device must already be added to this context.
 186 * Return: new file descriptor on success, -errno on failure.
 187 * Availability: When attached to container
 188 */
 189#define VFIO_GROUP_GET_DEVICE_FD        _IO(VFIO_TYPE, VFIO_BASE + 6)
 190
 191/* --------------- IOCTLs for DEVICE file descriptors --------------- */
 192
 193/**
 194 * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
 195 *                                              struct vfio_device_info)
 196 *
 197 * Retrieve information about the device.  Fills in provided
 198 * struct vfio_device_info.  Caller sets argsz.
 199 * Return: 0 on success, -errno on failure.
 200 */
 201struct vfio_device_info {
 202        __u32   argsz;
 203        __u32   flags;
 204#define VFIO_DEVICE_FLAGS_RESET (1 << 0)        /* Device supports reset */
 205#define VFIO_DEVICE_FLAGS_PCI   (1 << 1)        /* vfio-pci device */
 206#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)     /* vfio-platform device */
 207#define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)        /* vfio-amba device */
 208#define VFIO_DEVICE_FLAGS_CCW   (1 << 4)        /* vfio-ccw device */
 209#define VFIO_DEVICE_FLAGS_AP    (1 << 5)        /* vfio-ap device */
 210#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)       /* vfio-fsl-mc device */
 211#define VFIO_DEVICE_FLAGS_CAPS  (1 << 7)        /* Info supports caps */
 212        __u32   num_regions;    /* Max region index + 1 */
 213        __u32   num_irqs;       /* Max IRQ index + 1 */
 214        __u32   cap_offset;     /* Offset within info struct of first cap */
 215};
 216#define VFIO_DEVICE_GET_INFO            _IO(VFIO_TYPE, VFIO_BASE + 7)
 217
 218/*
 219 * Vendor driver using Mediated device framework should provide device_api
 220 * attribute in supported type attribute groups. Device API string should be one
 221 * of the following corresponding to device flags in vfio_device_info structure.
 222 */
 223
 224#define VFIO_DEVICE_API_PCI_STRING              "vfio-pci"
 225#define VFIO_DEVICE_API_PLATFORM_STRING         "vfio-platform"
 226#define VFIO_DEVICE_API_AMBA_STRING             "vfio-amba"
 227#define VFIO_DEVICE_API_CCW_STRING              "vfio-ccw"
 228#define VFIO_DEVICE_API_AP_STRING               "vfio-ap"
 229
 230/*
 231 * The following capabilities are unique to s390 zPCI devices.  Their contents
 232 * are further-defined in vfio_zdev.h
 233 */
 234#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE          1
 235#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP         2
 236#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL          3
 237#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP          4
 238
 239/**
 240 * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
 241 *                                     struct vfio_region_info)
 242 *
 243 * Retrieve information about a device region.  Caller provides
 244 * struct vfio_region_info with index value set.  Caller sets argsz.
 245 * Implementation of region mapping is bus driver specific.  This is
 246 * intended to describe MMIO, I/O port, as well as bus specific
 247 * regions (ex. PCI config space).  Zero sized regions may be used
 248 * to describe unimplemented regions (ex. unimplemented PCI BARs).
 249 * Return: 0 on success, -errno on failure.
 250 */
 251struct vfio_region_info {
 252        __u32   argsz;
 253        __u32   flags;
 254#define VFIO_REGION_INFO_FLAG_READ      (1 << 0) /* Region supports read */
 255#define VFIO_REGION_INFO_FLAG_WRITE     (1 << 1) /* Region supports write */
 256#define VFIO_REGION_INFO_FLAG_MMAP      (1 << 2) /* Region supports mmap */
 257#define VFIO_REGION_INFO_FLAG_CAPS      (1 << 3) /* Info supports caps */
 258        __u32   index;          /* Region index */
 259        __u32   cap_offset;     /* Offset within info struct of first cap */
 260        __u64   size;           /* Region size (bytes) */
 261        __u64   offset;         /* Region offset from start of device fd */
 262};
 263#define VFIO_DEVICE_GET_REGION_INFO     _IO(VFIO_TYPE, VFIO_BASE + 8)
 264
 265/*
 266 * The sparse mmap capability allows finer granularity of specifying areas
 267 * within a region with mmap support.  When specified, the user should only
 268 * mmap the offset ranges specified by the areas array.  mmaps outside of the
 269 * areas specified may fail (such as the range covering a PCI MSI-X table) or
 270 * may result in improper device behavior.
 271 *
 272 * The structures below define version 1 of this capability.
 273 */
 274#define VFIO_REGION_INFO_CAP_SPARSE_MMAP        1
 275
 276struct vfio_region_sparse_mmap_area {
 277        __u64   offset; /* Offset of mmap'able area within region */
 278        __u64   size;   /* Size of mmap'able area */
 279};
 280
 281struct vfio_region_info_cap_sparse_mmap {
 282        struct vfio_info_cap_header header;
 283        __u32   nr_areas;
 284        __u32   reserved;
 285        struct vfio_region_sparse_mmap_area areas[];
 286};
 287
 288/*
 289 * The device specific type capability allows regions unique to a specific
 290 * device or class of devices to be exposed.  This helps solve the problem for
 291 * vfio bus drivers of defining which region indexes correspond to which region
 292 * on the device, without needing to resort to static indexes, as done by
 293 * vfio-pci.  For instance, if we were to go back in time, we might remove
 294 * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
 295 * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
 296 * make a "VGA" device specific type to describe the VGA access space.  This
 297 * means that non-VGA devices wouldn't need to waste this index, and thus the
 298 * address space associated with it due to implementation of device file
 299 * descriptor offsets in vfio-pci.
 300 *
 301 * The current implementation is now part of the user ABI, so we can't use this
 302 * for VGA, but there are other upcoming use cases, such as opregions for Intel
 303 * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
 304 * use this for future additions.
 305 *
 306 * The structure below defines version 1 of this capability.
 307 */
 308#define VFIO_REGION_INFO_CAP_TYPE       2
 309
 310struct vfio_region_info_cap_type {
 311        struct vfio_info_cap_header header;
 312        __u32 type;     /* global per bus driver */
 313        __u32 subtype;  /* type specific */
 314};
 315
 316/*
 317 * List of region types, global per bus driver.
 318 * If you introduce a new type, please add it here.
 319 */
 320
 321/* PCI region type containing a PCI vendor part */
 322#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE        (1 << 31)
 323#define VFIO_REGION_TYPE_PCI_VENDOR_MASK        (0xffff)
 324#define VFIO_REGION_TYPE_GFX                    (1)
 325#define VFIO_REGION_TYPE_CCW                    (2)
 326#define VFIO_REGION_TYPE_MIGRATION              (3)
 327
 328/* sub-types for VFIO_REGION_TYPE_PCI_* */
 329
 330/* 8086 vendor PCI sub-types */
 331#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION  (1)
 332#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG  (2)
 333#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG   (3)
 334
 335/* 10de vendor PCI sub-types */
 336/*
 337 * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
 338 *
 339 * Deprecated, region no longer provided
 340 */
 341#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM  (1)
 342
 343/* 1014 vendor PCI sub-types */
 344/*
 345 * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
 346 * to do TLB invalidation on a GPU.
 347 *
 348 * Deprecated, region no longer provided
 349 */
 350#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD    (1)
 351
 352/* sub-types for VFIO_REGION_TYPE_GFX */
 353#define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
 354
 355/**
 356 * struct vfio_region_gfx_edid - EDID region layout.
 357 *
 358 * Set display link state and EDID blob.
 359 *
 360 * The EDID blob has monitor information such as brand, name, serial
 361 * number, physical size, supported video modes and more.
 362 *
 363 * This special region allows userspace (typically qemu) set a virtual
 364 * EDID for the virtual monitor, which allows a flexible display
 365 * configuration.
 366 *
 367 * For the edid blob spec look here:
 368 *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
 369 *
 370 * On linux systems you can find the EDID blob in sysfs:
 371 *    /sys/class/drm/${card}/${connector}/edid
 372 *
 373 * You can use the edid-decode ulility (comes with xorg-x11-utils) to
 374 * decode the EDID blob.
 375 *
 376 * @edid_offset: location of the edid blob, relative to the
 377 *               start of the region (readonly).
 378 * @edid_max_size: max size of the edid blob (readonly).
 379 * @edid_size: actual edid size (read/write).
 380 * @link_state: display link state (read/write).
 381 * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
 382 * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
 383 * @max_xres: max display width (0 == no limitation, readonly).
 384 * @max_yres: max display height (0 == no limitation, readonly).
 385 *
 386 * EDID update protocol:
 387 *   (1) set link-state to down.
 388 *   (2) update edid blob and size.
 389 *   (3) set link-state to up.
 390 */
 391struct vfio_region_gfx_edid {
 392        __u32 edid_offset;
 393        __u32 edid_max_size;
 394        __u32 edid_size;
 395        __u32 max_xres;
 396        __u32 max_yres;
 397        __u32 link_state;
 398#define VFIO_DEVICE_GFX_LINK_STATE_UP    1
 399#define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
 400};
 401
 402/* sub-types for VFIO_REGION_TYPE_CCW */
 403#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD       (1)
 404#define VFIO_REGION_SUBTYPE_CCW_SCHIB           (2)
 405#define VFIO_REGION_SUBTYPE_CCW_CRW             (3)
 406
 407/* sub-types for VFIO_REGION_TYPE_MIGRATION */
 408#define VFIO_REGION_SUBTYPE_MIGRATION           (1)
 409
 410/*
 411 * The structure vfio_device_migration_info is placed at the 0th offset of
 412 * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related
 413 * migration information. Field accesses from this structure are only supported
 414 * at their native width and alignment. Otherwise, the result is undefined and
 415 * vendor drivers should return an error.
 416 *
 417 * device_state: (read/write)
 418 *      - The user application writes to this field to inform the vendor driver
 419 *        about the device state to be transitioned to.
 420 *      - The vendor driver should take the necessary actions to change the
 421 *        device state. After successful transition to a given state, the
 422 *        vendor driver should return success on write(device_state, state)
 423 *        system call. If the device state transition fails, the vendor driver
 424 *        should return an appropriate -errno for the fault condition.
 425 *      - On the user application side, if the device state transition fails,
 426 *        that is, if write(device_state, state) returns an error, read
 427 *        device_state again to determine the current state of the device from
 428 *        the vendor driver.
 429 *      - The vendor driver should return previous state of the device unless
 430 *        the vendor driver has encountered an internal error, in which case
 431 *        the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR.
 432 *      - The user application must use the device reset ioctl to recover the
 433 *        device from VFIO_DEVICE_STATE_ERROR state. If the device is
 434 *        indicated to be in a valid device state by reading device_state, the
 435 *        user application may attempt to transition the device to any valid
 436 *        state reachable from the current state or terminate itself.
 437 *
 438 *      device_state consists of 3 bits:
 439 *      - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear,
 440 *        it indicates the _STOP state. When the device state is changed to
 441 *        _STOP, driver should stop the device before write() returns.
 442 *      - If bit 1 is set, it indicates the _SAVING state, which means that the
 443 *        driver should start gathering device state information that will be
 444 *        provided to the VFIO user application to save the device's state.
 445 *      - If bit 2 is set, it indicates the _RESUMING state, which means that
 446 *        the driver should prepare to resume the device. Data provided through
 447 *        the migration region should be used to resume the device.
 448 *      Bits 3 - 31 are reserved for future use. To preserve them, the user
 449 *      application should perform a read-modify-write operation on this
 450 *      field when modifying the specified bits.
 451 *
 452 *  +------- _RESUMING
 453 *  |+------ _SAVING
 454 *  ||+----- _RUNNING
 455 *  |||
 456 *  000b => Device Stopped, not saving or resuming
 457 *  001b => Device running, which is the default state
 458 *  010b => Stop the device & save the device state, stop-and-copy state
 459 *  011b => Device running and save the device state, pre-copy state
 460 *  100b => Device stopped and the device state is resuming
 461 *  101b => Invalid state
 462 *  110b => Error state
 463 *  111b => Invalid state
 464 *
 465 * State transitions:
 466 *
 467 *              _RESUMING  _RUNNING    Pre-copy    Stop-and-copy   _STOP
 468 *                (100b)     (001b)     (011b)        (010b)       (000b)
 469 * 0. Running or default state
 470 *                             |
 471 *
 472 * 1. Normal Shutdown (optional)
 473 *                             |------------------------------------->|
 474 *
 475 * 2. Save the state or suspend
 476 *                             |------------------------->|---------->|
 477 *
 478 * 3. Save the state during live migration
 479 *                             |----------->|------------>|---------->|
 480 *
 481 * 4. Resuming
 482 *                  |<---------|
 483 *
 484 * 5. Resumed
 485 *                  |--------->|
 486 *
 487 * 0. Default state of VFIO device is _RUNNING when the user application starts.
 488 * 1. During normal shutdown of the user application, the user application may
 489 *    optionally change the VFIO device state from _RUNNING to _STOP. This
 490 *    transition is optional. The vendor driver must support this transition but
 491 *    must not require it.
 492 * 2. When the user application saves state or suspends the application, the
 493 *    device state transitions from _RUNNING to stop-and-copy and then to _STOP.
 494 *    On state transition from _RUNNING to stop-and-copy, driver must stop the
 495 *    device, save the device state and send it to the application through the
 496 *    migration region. The sequence to be followed for such transition is given
 497 *    below.
 498 * 3. In live migration of user application, the state transitions from _RUNNING
 499 *    to pre-copy, to stop-and-copy, and to _STOP.
 500 *    On state transition from _RUNNING to pre-copy, the driver should start
 501 *    gathering the device state while the application is still running and send
 502 *    the device state data to application through the migration region.
 503 *    On state transition from pre-copy to stop-and-copy, the driver must stop
 504 *    the device, save the device state and send it to the user application
 505 *    through the migration region.
 506 *    Vendor drivers must support the pre-copy state even for implementations
 507 *    where no data is provided to the user before the stop-and-copy state. The
 508 *    user must not be required to consume all migration data before the device
 509 *    transitions to a new state, including the stop-and-copy state.
 510 *    The sequence to be followed for above two transitions is given below.
 511 * 4. To start the resuming phase, the device state should be transitioned from
 512 *    the _RUNNING to the _RESUMING state.
 513 *    In the _RESUMING state, the driver should use the device state data
 514 *    received through the migration region to resume the device.
 515 * 5. After providing saved device data to the driver, the application should
 516 *    change the state from _RESUMING to _RUNNING.
 517 *
 518 * reserved:
 519 *      Reads on this field return zero and writes are ignored.
 520 *
 521 * pending_bytes: (read only)
 522 *      The number of pending bytes still to be migrated from the vendor driver.
 523 *
 524 * data_offset: (read only)
 525 *      The user application should read data_offset field from the migration
 526 *      region. The user application should read the device data from this
 527 *      offset within the migration region during the _SAVING state or write
 528 *      the device data during the _RESUMING state. See below for details of
 529 *      sequence to be followed.
 530 *
 531 * data_size: (read/write)
 532 *      The user application should read data_size to get the size in bytes of
 533 *      the data copied in the migration region during the _SAVING state and
 534 *      write the size in bytes of the data copied in the migration region
 535 *      during the _RESUMING state.
 536 *
 537 * The format of the migration region is as follows:
 538 *  ------------------------------------------------------------------
 539 * |vfio_device_migration_info|    data section                      |
 540 * |                          |     ///////////////////////////////  |
 541 * ------------------------------------------------------------------
 542 *   ^                              ^
 543 *  offset 0-trapped part        data_offset
 544 *
 545 * The structure vfio_device_migration_info is always followed by the data
 546 * section in the region, so data_offset will always be nonzero. The offset
 547 * from where the data is copied is decided by the kernel driver. The data
 548 * section can be trapped, mmapped, or partitioned, depending on how the kernel
 549 * driver defines the data section. The data section partition can be defined
 550 * as mapped by the sparse mmap capability. If mmapped, data_offset must be
 551 * page aligned, whereas initial section which contains the
 552 * vfio_device_migration_info structure, might not end at the offset, which is
 553 * page aligned. The user is not required to access through mmap regardless
 554 * of the capabilities of the region mmap.
 555 * The vendor driver should determine whether and how to partition the data
 556 * section. The vendor driver should return data_offset accordingly.
 557 *
 558 * The sequence to be followed while in pre-copy state and stop-and-copy state
 559 * is as follows:
 560 * a. Read pending_bytes, indicating the start of a new iteration to get device
 561 *    data. Repeated read on pending_bytes at this stage should have no side
 562 *    effects.
 563 *    If pending_bytes == 0, the user application should not iterate to get data
 564 *    for that device.
 565 *    If pending_bytes > 0, perform the following steps.
 566 * b. Read data_offset, indicating that the vendor driver should make data
 567 *    available through the data section. The vendor driver should return this
 568 *    read operation only after data is available from (region + data_offset)
 569 *    to (region + data_offset + data_size).
 570 * c. Read data_size, which is the amount of data in bytes available through
 571 *    the migration region.
 572 *    Read on data_offset and data_size should return the offset and size of
 573 *    the current buffer if the user application reads data_offset and
 574 *    data_size more than once here.
 575 * d. Read data_size bytes of data from (region + data_offset) from the
 576 *    migration region.
 577 * e. Process the data.
 578 * f. Read pending_bytes, which indicates that the data from the previous
 579 *    iteration has been read. If pending_bytes > 0, go to step b.
 580 *
 581 * The user application can transition from the _SAVING|_RUNNING
 582 * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the
 583 * number of pending bytes. The user application should iterate in _SAVING
 584 * (stop-and-copy) until pending_bytes is 0.
 585 *
 586 * The sequence to be followed while _RESUMING device state is as follows:
 587 * While data for this device is available, repeat the following steps:
 588 * a. Read data_offset from where the user application should write data.
 589 * b. Write migration data starting at the migration region + data_offset for
 590 *    the length determined by data_size from the migration source.
 591 * c. Write data_size, which indicates to the vendor driver that data is
 592 *    written in the migration region. Vendor driver must return this write
 593 *    operations on consuming data. Vendor driver should apply the
 594 *    user-provided migration region data to the device resume state.
 595 *
 596 * If an error occurs during the above sequences, the vendor driver can return
 597 * an error code for next read() or write() operation, which will terminate the
 598 * loop. The user application should then take the next necessary action, for
 599 * example, failing migration or terminating the user application.
 600 *
 601 * For the user application, data is opaque. The user application should write
 602 * data in the same order as the data is received and the data should be of
 603 * same transaction size at the source.
 604 */
 605
 606struct vfio_device_migration_info {
 607        __u32 device_state;         /* VFIO device state */
 608#define VFIO_DEVICE_STATE_STOP      (0)
 609#define VFIO_DEVICE_STATE_RUNNING   (1 << 0)
 610#define VFIO_DEVICE_STATE_SAVING    (1 << 1)
 611#define VFIO_DEVICE_STATE_RESUMING  (1 << 2)
 612#define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_RUNNING | \
 613                                     VFIO_DEVICE_STATE_SAVING |  \
 614                                     VFIO_DEVICE_STATE_RESUMING)
 615
 616#define VFIO_DEVICE_STATE_VALID(state) \
 617        (state & VFIO_DEVICE_STATE_RESUMING ? \
 618        (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1)
 619
 620#define VFIO_DEVICE_STATE_IS_ERROR(state) \
 621        ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \
 622                                              VFIO_DEVICE_STATE_RESUMING))
 623
 624#define VFIO_DEVICE_STATE_SET_ERROR(state) \
 625        ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \
 626                                             VFIO_DEVICE_STATE_RESUMING)
 627
 628        __u32 reserved;
 629        __u64 pending_bytes;
 630        __u64 data_offset;
 631        __u64 data_size;
 632};
 633
 634/*
 635 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
 636 * which allows direct access to non-MSIX registers which happened to be within
 637 * the same system page.
 638 *
 639 * Even though the userspace gets direct access to the MSIX data, the existing
 640 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
 641 */
 642#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE      3
 643
 644/*
 645 * Capability with compressed real address (aka SSA - small system address)
 646 * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
 647 * and by the userspace to associate a NVLink bridge with a GPU.
 648 *
 649 * Deprecated, capability no longer provided
 650 */
 651#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT     4
 652
 653struct vfio_region_info_cap_nvlink2_ssatgt {
 654        struct vfio_info_cap_header header;
 655        __u64 tgt;
 656};
 657
 658/*
 659 * Capability with an NVLink link speed. The value is read by
 660 * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
 661 * property in the device tree. The value is fixed in the hardware
 662 * and failing to provide the correct value results in the link
 663 * not working with no indication from the driver why.
 664 *
 665 * Deprecated, capability no longer provided
 666 */
 667#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD     5
 668
 669struct vfio_region_info_cap_nvlink2_lnkspd {
 670        struct vfio_info_cap_header header;
 671        __u32 link_speed;
 672        __u32 __pad;
 673};
 674
 675/**
 676 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
 677 *                                  struct vfio_irq_info)
 678 *
 679 * Retrieve information about a device IRQ.  Caller provides
 680 * struct vfio_irq_info with index value set.  Caller sets argsz.
 681 * Implementation of IRQ mapping is bus driver specific.  Indexes
 682 * using multiple IRQs are primarily intended to support MSI-like
 683 * interrupt blocks.  Zero count irq blocks may be used to describe
 684 * unimplemented interrupt types.
 685 *
 686 * The EVENTFD flag indicates the interrupt index supports eventfd based
 687 * signaling.
 688 *
 689 * The MASKABLE flags indicates the index supports MASK and UNMASK
 690 * actions described below.
 691 *
 692 * AUTOMASKED indicates that after signaling, the interrupt line is
 693 * automatically masked by VFIO and the user needs to unmask the line
 694 * to receive new interrupts.  This is primarily intended to distinguish
 695 * level triggered interrupts.
 696 *
 697 * The NORESIZE flag indicates that the interrupt lines within the index
 698 * are setup as a set and new subindexes cannot be enabled without first
 699 * disabling the entire index.  This is used for interrupts like PCI MSI
 700 * and MSI-X where the driver may only use a subset of the available
 701 * indexes, but VFIO needs to enable a specific number of vectors
 702 * upfront.  In the case of MSI-X, where the user can enable MSI-X and
 703 * then add and unmask vectors, it's up to userspace to make the decision
 704 * whether to allocate the maximum supported number of vectors or tear
 705 * down setup and incrementally increase the vectors as each is enabled.
 706 */
 707struct vfio_irq_info {
 708        __u32   argsz;
 709        __u32   flags;
 710#define VFIO_IRQ_INFO_EVENTFD           (1 << 0)
 711#define VFIO_IRQ_INFO_MASKABLE          (1 << 1)
 712#define VFIO_IRQ_INFO_AUTOMASKED        (1 << 2)
 713#define VFIO_IRQ_INFO_NORESIZE          (1 << 3)
 714        __u32   index;          /* IRQ index */
 715        __u32   count;          /* Number of IRQs within this index */
 716};
 717#define VFIO_DEVICE_GET_IRQ_INFO        _IO(VFIO_TYPE, VFIO_BASE + 9)
 718
 719/**
 720 * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
 721 *
 722 * Set signaling, masking, and unmasking of interrupts.  Caller provides
 723 * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
 724 * the range of subindexes being specified.
 725 *
 726 * The DATA flags specify the type of data provided.  If DATA_NONE, the
 727 * operation performs the specified action immediately on the specified
 728 * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
 729 * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
 730 *
 731 * DATA_BOOL allows sparse support for the same on arrays of interrupts.
 732 * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
 733 * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
 734 * data = {1,0,1}
 735 *
 736 * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
 737 * A value of -1 can be used to either de-assign interrupts if already
 738 * assigned or skip un-assigned interrupts.  For example, to set an eventfd
 739 * to be trigger for interrupts [0,0] and [0,2]:
 740 * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
 741 * data = {fd1, -1, fd2}
 742 * If index [0,1] is previously set, two count = 1 ioctls calls would be
 743 * required to set [0,0] and [0,2] without changing [0,1].
 744 *
 745 * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
 746 * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
 747 * from userspace (ie. simulate hardware triggering).
 748 *
 749 * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
 750 * enables the interrupt index for the device.  Individual subindex interrupts
 751 * can be disabled using the -1 value for DATA_EVENTFD or the index can be
 752 * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
 753 *
 754 * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
 755 * ACTION_TRIGGER specifies kernel->user signaling.
 756 */
 757struct vfio_irq_set {
 758        __u32   argsz;
 759        __u32   flags;
 760#define VFIO_IRQ_SET_DATA_NONE          (1 << 0) /* Data not present */
 761#define VFIO_IRQ_SET_DATA_BOOL          (1 << 1) /* Data is bool (u8) */
 762#define VFIO_IRQ_SET_DATA_EVENTFD       (1 << 2) /* Data is eventfd (s32) */
 763#define VFIO_IRQ_SET_ACTION_MASK        (1 << 3) /* Mask interrupt */
 764#define VFIO_IRQ_SET_ACTION_UNMASK      (1 << 4) /* Unmask interrupt */
 765#define VFIO_IRQ_SET_ACTION_TRIGGER     (1 << 5) /* Trigger interrupt */
 766        __u32   index;
 767        __u32   start;
 768        __u32   count;
 769        __u8    data[];
 770};
 771#define VFIO_DEVICE_SET_IRQS            _IO(VFIO_TYPE, VFIO_BASE + 10)
 772
 773#define VFIO_IRQ_SET_DATA_TYPE_MASK     (VFIO_IRQ_SET_DATA_NONE | \
 774                                         VFIO_IRQ_SET_DATA_BOOL | \
 775                                         VFIO_IRQ_SET_DATA_EVENTFD)
 776#define VFIO_IRQ_SET_ACTION_TYPE_MASK   (VFIO_IRQ_SET_ACTION_MASK | \
 777                                         VFIO_IRQ_SET_ACTION_UNMASK | \
 778                                         VFIO_IRQ_SET_ACTION_TRIGGER)
 779/**
 780 * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
 781 *
 782 * Reset a device.
 783 */
 784#define VFIO_DEVICE_RESET               _IO(VFIO_TYPE, VFIO_BASE + 11)
 785
 786/*
 787 * The VFIO-PCI bus driver makes use of the following fixed region and
 788 * IRQ index mapping.  Unimplemented regions return a size of zero.
 789 * Unimplemented IRQ types return a count of zero.
 790 */
 791
 792enum {
 793        VFIO_PCI_BAR0_REGION_INDEX,
 794        VFIO_PCI_BAR1_REGION_INDEX,
 795        VFIO_PCI_BAR2_REGION_INDEX,
 796        VFIO_PCI_BAR3_REGION_INDEX,
 797        VFIO_PCI_BAR4_REGION_INDEX,
 798        VFIO_PCI_BAR5_REGION_INDEX,
 799        VFIO_PCI_ROM_REGION_INDEX,
 800        VFIO_PCI_CONFIG_REGION_INDEX,
 801        /*
 802         * Expose VGA regions defined for PCI base class 03, subclass 00.
 803         * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
 804         * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
 805         * range is found at it's identity mapped offset from the region
 806         * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
 807         * between described ranges are unimplemented.
 808         */
 809        VFIO_PCI_VGA_REGION_INDEX,
 810        VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
 811                                 /* device specific cap to define content. */
 812};
 813
 814enum {
 815        VFIO_PCI_INTX_IRQ_INDEX,
 816        VFIO_PCI_MSI_IRQ_INDEX,
 817        VFIO_PCI_MSIX_IRQ_INDEX,
 818        VFIO_PCI_ERR_IRQ_INDEX,
 819        VFIO_PCI_REQ_IRQ_INDEX,
 820        VFIO_PCI_NUM_IRQS
 821};
 822
 823/*
 824 * The vfio-ccw bus driver makes use of the following fixed region and
 825 * IRQ index mapping. Unimplemented regions return a size of zero.
 826 * Unimplemented IRQ types return a count of zero.
 827 */
 828
 829enum {
 830        VFIO_CCW_CONFIG_REGION_INDEX,
 831        VFIO_CCW_NUM_REGIONS
 832};
 833
 834enum {
 835        VFIO_CCW_IO_IRQ_INDEX,
 836        VFIO_CCW_CRW_IRQ_INDEX,
 837        VFIO_CCW_REQ_IRQ_INDEX,
 838        VFIO_CCW_NUM_IRQS
 839};
 840
 841/**
 842 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
 843 *                                            struct vfio_pci_hot_reset_info)
 844 *
 845 * Return: 0 on success, -errno on failure:
 846 *      -enospc = insufficient buffer, -enodev = unsupported for device.
 847 */
 848struct vfio_pci_dependent_device {
 849        __u32   group_id;
 850        __u16   segment;
 851        __u8    bus;
 852        __u8    devfn; /* Use PCI_SLOT/PCI_FUNC */
 853};
 854
 855struct vfio_pci_hot_reset_info {
 856        __u32   argsz;
 857        __u32   flags;
 858        __u32   count;
 859        struct vfio_pci_dependent_device        devices[];
 860};
 861
 862#define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO      _IO(VFIO_TYPE, VFIO_BASE + 12)
 863
 864/**
 865 * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
 866 *                                  struct vfio_pci_hot_reset)
 867 *
 868 * Return: 0 on success, -errno on failure.
 869 */
 870struct vfio_pci_hot_reset {
 871        __u32   argsz;
 872        __u32   flags;
 873        __u32   count;
 874        __s32   group_fds[];
 875};
 876
 877#define VFIO_DEVICE_PCI_HOT_RESET       _IO(VFIO_TYPE, VFIO_BASE + 13)
 878
 879/**
 880 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
 881 *                                    struct vfio_device_query_gfx_plane)
 882 *
 883 * Set the drm_plane_type and flags, then retrieve the gfx plane info.
 884 *
 885 * flags supported:
 886 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
 887 *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
 888 *   support for dma-buf.
 889 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
 890 *   to ask if the mdev supports region. 0 on support, -EINVAL on no
 891 *   support for region.
 892 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
 893 *   with each call to query the plane info.
 894 * - Others are invalid and return -EINVAL.
 895 *
 896 * Note:
 897 * 1. Plane could be disabled by guest. In that case, success will be
 898 *    returned with zero-initialized drm_format, size, width and height
 899 *    fields.
 900 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
 901 *
 902 * Return: 0 on success, -errno on other failure.
 903 */
 904struct vfio_device_gfx_plane_info {
 905        __u32 argsz;
 906        __u32 flags;
 907#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
 908#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
 909#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
 910        /* in */
 911        __u32 drm_plane_type;   /* type of plane: DRM_PLANE_TYPE_* */
 912        /* out */
 913        __u32 drm_format;       /* drm format of plane */
 914        __u64 drm_format_mod;   /* tiled mode */
 915        __u32 width;    /* width of plane */
 916        __u32 height;   /* height of plane */
 917        __u32 stride;   /* stride of plane */
 918        __u32 size;     /* size of plane in bytes, align on page*/
 919        __u32 x_pos;    /* horizontal position of cursor plane */
 920        __u32 y_pos;    /* vertical position of cursor plane*/
 921        __u32 x_hot;    /* horizontal position of cursor hotspot */
 922        __u32 y_hot;    /* vertical position of cursor hotspot */
 923        union {
 924                __u32 region_index;     /* region index */
 925                __u32 dmabuf_id;        /* dma-buf id */
 926        };
 927};
 928
 929#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
 930
 931/**
 932 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
 933 *
 934 * Return a new dma-buf file descriptor for an exposed guest framebuffer
 935 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
 936 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
 937 */
 938
 939#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
 940
 941/**
 942 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
 943 *                              struct vfio_device_ioeventfd)
 944 *
 945 * Perform a write to the device at the specified device fd offset, with
 946 * the specified data and width when the provided eventfd is triggered.
 947 * vfio bus drivers may not support this for all regions, for all widths,
 948 * or at all.  vfio-pci currently only enables support for BAR regions,
 949 * excluding the MSI-X vector table.
 950 *
 951 * Return: 0 on success, -errno on failure.
 952 */
 953struct vfio_device_ioeventfd {
 954        __u32   argsz;
 955        __u32   flags;
 956#define VFIO_DEVICE_IOEVENTFD_8         (1 << 0) /* 1-byte write */
 957#define VFIO_DEVICE_IOEVENTFD_16        (1 << 1) /* 2-byte write */
 958#define VFIO_DEVICE_IOEVENTFD_32        (1 << 2) /* 4-byte write */
 959#define VFIO_DEVICE_IOEVENTFD_64        (1 << 3) /* 8-byte write */
 960#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
 961        __u64   offset;                 /* device fd offset of write */
 962        __u64   data;                   /* data to be written */
 963        __s32   fd;                     /* -1 for de-assignment */
 964};
 965
 966#define VFIO_DEVICE_IOEVENTFD           _IO(VFIO_TYPE, VFIO_BASE + 16)
 967
 968/**
 969 * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17,
 970 *                             struct vfio_device_feature)
 971 *
 972 * Get, set, or probe feature data of the device.  The feature is selected
 973 * using the FEATURE_MASK portion of the flags field.  Support for a feature
 974 * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
 975 * may optionally include the GET and/or SET bits to determine read vs write
 976 * access of the feature respectively.  Probing a feature will return success
 977 * if the feature is supported and all of the optionally indicated GET/SET
 978 * methods are supported.  The format of the data portion of the structure is
 979 * specific to the given feature.  The data portion is not required for
 980 * probing.  GET and SET are mutually exclusive, except for use with PROBE.
 981 *
 982 * Return 0 on success, -errno on failure.
 983 */
 984struct vfio_device_feature {
 985        __u32   argsz;
 986        __u32   flags;
 987#define VFIO_DEVICE_FEATURE_MASK        (0xffff) /* 16-bit feature index */
 988#define VFIO_DEVICE_FEATURE_GET         (1 << 16) /* Get feature into data[] */
 989#define VFIO_DEVICE_FEATURE_SET         (1 << 17) /* Set feature from data[] */
 990#define VFIO_DEVICE_FEATURE_PROBE       (1 << 18) /* Probe feature support */
 991        __u8    data[];
 992};
 993
 994#define VFIO_DEVICE_FEATURE             _IO(VFIO_TYPE, VFIO_BASE + 17)
 995
 996/*
 997 * Provide support for setting a PCI VF Token, which is used as a shared
 998 * secret between PF and VF drivers.  This feature may only be set on a
 999 * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
1000 * open VFs.  Data provided when setting this feature is a 16-byte array
1001 * (__u8 b[16]), representing a UUID.
1002 */
1003#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN        (0)
1004
1005/* -------- API for Type1 VFIO IOMMU -------- */
1006
1007/**
1008 * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
1009 *
1010 * Retrieve information about the IOMMU object. Fills in provided
1011 * struct vfio_iommu_info. Caller sets argsz.
1012 *
1013 * XXX Should we do these by CHECK_EXTENSION too?
1014 */
1015struct vfio_iommu_type1_info {
1016        __u32   argsz;
1017        __u32   flags;
1018#define VFIO_IOMMU_INFO_PGSIZES (1 << 0)        /* supported page sizes info */
1019#define VFIO_IOMMU_INFO_CAPS    (1 << 1)        /* Info supports caps */
1020        __u64   iova_pgsizes;   /* Bitmap of supported page sizes */
1021        __u32   cap_offset;     /* Offset within info struct of first cap */
1022};
1023
1024/*
1025 * The IOVA capability allows to report the valid IOVA range(s)
1026 * excluding any non-relaxable reserved regions exposed by
1027 * devices attached to the container. Any DMA map attempt
1028 * outside the valid iova range will return error.
1029 *
1030 * The structures below define version 1 of this capability.
1031 */
1032#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
1033
1034struct vfio_iova_range {
1035        __u64   start;
1036        __u64   end;
1037};
1038
1039struct vfio_iommu_type1_info_cap_iova_range {
1040        struct  vfio_info_cap_header header;
1041        __u32   nr_iovas;
1042        __u32   reserved;
1043        struct  vfio_iova_range iova_ranges[];
1044};
1045
1046/*
1047 * The migration capability allows to report supported features for migration.
1048 *
1049 * The structures below define version 1 of this capability.
1050 *
1051 * The existence of this capability indicates that IOMMU kernel driver supports
1052 * dirty page logging.
1053 *
1054 * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
1055 * page logging.
1056 * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
1057 * size in bytes that can be used by user applications when getting the dirty
1058 * bitmap.
1059 */
1060#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
1061
1062struct vfio_iommu_type1_info_cap_migration {
1063        struct  vfio_info_cap_header header;
1064        __u32   flags;
1065        __u64   pgsize_bitmap;
1066        __u64   max_dirty_bitmap_size;          /* in bytes */
1067};
1068
1069/*
1070 * The DMA available capability allows to report the current number of
1071 * simultaneously outstanding DMA mappings that are allowed.
1072 *
1073 * The structure below defines version 1 of this capability.
1074 *
1075 * avail: specifies the current number of outstanding DMA mappings allowed.
1076 */
1077#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
1078
1079struct vfio_iommu_type1_info_dma_avail {
1080        struct  vfio_info_cap_header header;
1081        __u32   avail;
1082};
1083
1084#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
1085
1086/**
1087 * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
1088 *
1089 * Map process virtual addresses to IO virtual addresses using the
1090 * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
1091 *
1092 * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
1093 * unblock translation of host virtual addresses in the iova range.  The vaddr
1094 * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
1095 * maintain memory consistency within the user application, the updated vaddr
1096 * must address the same memory object as originally mapped.  Failure to do so
1097 * will result in user memory corruption and/or device misbehavior.  iova and
1098 * size must match those in the original MAP_DMA call.  Protection is not
1099 * changed, and the READ & WRITE flags must be 0.
1100 */
1101struct vfio_iommu_type1_dma_map {
1102        __u32   argsz;
1103        __u32   flags;
1104#define VFIO_DMA_MAP_FLAG_READ (1 << 0)         /* readable from device */
1105#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)        /* writable from device */
1106#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
1107        __u64   vaddr;                          /* Process virtual address */
1108        __u64   iova;                           /* IO virtual address */
1109        __u64   size;                           /* Size of mapping (bytes) */
1110};
1111
1112#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
1113
1114struct vfio_bitmap {
1115        __u64        pgsize;    /* page size for bitmap in bytes */
1116        __u64        size;      /* in bytes */
1117        __u64 *data;    /* one bit per page */
1118};
1119
1120/**
1121 * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
1122 *                                                      struct vfio_dma_unmap)
1123 *
1124 * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
1125 * Caller sets argsz.  The actual unmapped size is returned in the size
1126 * field.  No guarantee is made to the user that arbitrary unmaps of iova
1127 * or size different from those used in the original mapping call will
1128 * succeed.
1129 *
1130 * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
1131 * before unmapping IO virtual addresses. When this flag is set, the user must
1132 * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
1133 * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
1134 * A bit in the bitmap represents one page, of user provided page size in
1135 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1136 * indicates that the page at that offset from iova is dirty. A Bitmap of the
1137 * pages in the range of unmapped size is returned in the user-provided
1138 * vfio_bitmap.data.
1139 *
1140 * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses.  iova and size
1141 * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
1142 *
1143 * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
1144 * virtual addresses in the iova range.  Tasks that attempt to translate an
1145 * iova's vaddr will block.  DMA to already-mapped pages continues.  This
1146 * cannot be combined with the get-dirty-bitmap flag.
1147 */
1148struct vfio_iommu_type1_dma_unmap {
1149        __u32   argsz;
1150        __u32   flags;
1151#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
1152#define VFIO_DMA_UNMAP_FLAG_ALL              (1 << 1)
1153#define VFIO_DMA_UNMAP_FLAG_VADDR            (1 << 2)
1154        __u64   iova;                           /* IO virtual address */
1155        __u64   size;                           /* Size of mapping (bytes) */
1156        __u8    data[];
1157};
1158
1159#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
1160
1161/*
1162 * IOCTLs to enable/disable IOMMU container usage.
1163 * No parameters are supported.
1164 */
1165#define VFIO_IOMMU_ENABLE       _IO(VFIO_TYPE, VFIO_BASE + 15)
1166#define VFIO_IOMMU_DISABLE      _IO(VFIO_TYPE, VFIO_BASE + 16)
1167
1168/**
1169 * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
1170 *                                     struct vfio_iommu_type1_dirty_bitmap)
1171 * IOCTL is used for dirty pages logging.
1172 * Caller should set flag depending on which operation to perform, details as
1173 * below:
1174 *
1175 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
1176 * the IOMMU driver to log pages that are dirtied or potentially dirtied by
1177 * the device; designed to be used when a migration is in progress. Dirty pages
1178 * are logged until logging is disabled by user application by calling the IOCTL
1179 * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
1180 *
1181 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
1182 * the IOMMU driver to stop logging dirtied pages.
1183 *
1184 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
1185 * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
1186 * The user must specify the IOVA range and the pgsize through the structure
1187 * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
1188 * supports getting a bitmap of the smallest supported pgsize only and can be
1189 * modified in future to get a bitmap of any specified supported pgsize. The
1190 * user must provide a zeroed memory area for the bitmap memory and specify its
1191 * size in bitmap.size. One bit is used to represent one page consecutively
1192 * starting from iova offset. The user should provide page size in bitmap.pgsize
1193 * field. A bit set in the bitmap indicates that the page at that offset from
1194 * iova is dirty. The caller must set argsz to a value including the size of
1195 * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
1196 * actual bitmap. If dirty pages logging is not enabled, an error will be
1197 * returned.
1198 *
1199 * Only one of the flags _START, _STOP and _GET may be specified at a time.
1200 *
1201 */
1202struct vfio_iommu_type1_dirty_bitmap {
1203        __u32        argsz;
1204        __u32        flags;
1205#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START       (1 << 0)
1206#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP        (1 << 1)
1207#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP  (1 << 2)
1208        __u8         data[];
1209};
1210
1211struct vfio_iommu_type1_dirty_bitmap_get {
1212        __u64              iova;        /* IO virtual address */
1213        __u64              size;        /* Size of iova range */
1214        struct vfio_bitmap bitmap;
1215};
1216
1217#define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
1218
1219/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
1220
1221/*
1222 * The SPAPR TCE DDW info struct provides the information about
1223 * the details of Dynamic DMA window capability.
1224 *
1225 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
1226 * @max_dynamic_windows_supported tells the maximum number of windows
1227 * which the platform can create.
1228 * @levels tells the maximum number of levels in multi-level IOMMU tables;
1229 * this allows splitting a table into smaller chunks which reduces
1230 * the amount of physically contiguous memory required for the table.
1231 */
1232struct vfio_iommu_spapr_tce_ddw_info {
1233        __u64 pgsizes;                  /* Bitmap of supported page sizes */
1234        __u32 max_dynamic_windows_supported;
1235        __u32 levels;
1236};
1237
1238/*
1239 * The SPAPR TCE info struct provides the information about the PCI bus
1240 * address ranges available for DMA, these values are programmed into
1241 * the hardware so the guest has to know that information.
1242 *
1243 * The DMA 32 bit window start is an absolute PCI bus address.
1244 * The IOVA address passed via map/unmap ioctls are absolute PCI bus
1245 * addresses too so the window works as a filter rather than an offset
1246 * for IOVA addresses.
1247 *
1248 * Flags supported:
1249 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
1250 *   (DDW) support is present. @ddw is only supported when DDW is present.
1251 */
1252struct vfio_iommu_spapr_tce_info {
1253        __u32 argsz;
1254        __u32 flags;
1255#define VFIO_IOMMU_SPAPR_INFO_DDW       (1 << 0)        /* DDW supported */
1256        __u32 dma32_window_start;       /* 32 bit window start (bytes) */
1257        __u32 dma32_window_size;        /* 32 bit window size (bytes) */
1258        struct vfio_iommu_spapr_tce_ddw_info ddw;
1259};
1260
1261#define VFIO_IOMMU_SPAPR_TCE_GET_INFO   _IO(VFIO_TYPE, VFIO_BASE + 12)
1262
1263/*
1264 * EEH PE operation struct provides ways to:
1265 * - enable/disable EEH functionality;
1266 * - unfreeze IO/DMA for frozen PE;
1267 * - read PE state;
1268 * - reset PE;
1269 * - configure PE;
1270 * - inject EEH error.
1271 */
1272struct vfio_eeh_pe_err {
1273        __u32 type;
1274        __u32 func;
1275        __u64 addr;
1276        __u64 mask;
1277};
1278
1279struct vfio_eeh_pe_op {
1280        __u32 argsz;
1281        __u32 flags;
1282        __u32 op;
1283        union {
1284                struct vfio_eeh_pe_err err;
1285        };
1286};
1287
1288#define VFIO_EEH_PE_DISABLE             0       /* Disable EEH functionality */
1289#define VFIO_EEH_PE_ENABLE              1       /* Enable EEH functionality  */
1290#define VFIO_EEH_PE_UNFREEZE_IO         2       /* Enable IO for frozen PE   */
1291#define VFIO_EEH_PE_UNFREEZE_DMA        3       /* Enable DMA for frozen PE  */
1292#define VFIO_EEH_PE_GET_STATE           4       /* PE state retrieval        */
1293#define  VFIO_EEH_PE_STATE_NORMAL       0       /* PE in functional state    */
1294#define  VFIO_EEH_PE_STATE_RESET        1       /* PE reset in progress      */
1295#define  VFIO_EEH_PE_STATE_STOPPED      2       /* Stopped DMA and IO        */
1296#define  VFIO_EEH_PE_STATE_STOPPED_DMA  4       /* Stopped DMA only          */
1297#define  VFIO_EEH_PE_STATE_UNAVAIL      5       /* State unavailable         */
1298#define VFIO_EEH_PE_RESET_DEACTIVATE    5       /* Deassert PE reset         */
1299#define VFIO_EEH_PE_RESET_HOT           6       /* Assert hot reset          */
1300#define VFIO_EEH_PE_RESET_FUNDAMENTAL   7       /* Assert fundamental reset  */
1301#define VFIO_EEH_PE_CONFIGURE           8       /* PE configuration          */
1302#define VFIO_EEH_PE_INJECT_ERR          9       /* Inject EEH error          */
1303
1304#define VFIO_EEH_PE_OP                  _IO(VFIO_TYPE, VFIO_BASE + 21)
1305
1306/**
1307 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
1308 *
1309 * Registers user space memory where DMA is allowed. It pins
1310 * user pages and does the locked memory accounting so
1311 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
1312 * get faster.
1313 */
1314struct vfio_iommu_spapr_register_memory {
1315        __u32   argsz;
1316        __u32   flags;
1317        __u64   vaddr;                          /* Process virtual address */
1318        __u64   size;                           /* Size of mapping (bytes) */
1319};
1320#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY        _IO(VFIO_TYPE, VFIO_BASE + 17)
1321
1322/**
1323 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
1324 *
1325 * Unregisters user space memory registered with
1326 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
1327 * Uses vfio_iommu_spapr_register_memory for parameters.
1328 */
1329#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY      _IO(VFIO_TYPE, VFIO_BASE + 18)
1330
1331/**
1332 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
1333 *
1334 * Creates an additional TCE table and programs it (sets a new DMA window)
1335 * to every IOMMU group in the container. It receives page shift, window
1336 * size and number of levels in the TCE table being created.
1337 *
1338 * It allocates and returns an offset on a PCI bus of the new DMA window.
1339 */
1340struct vfio_iommu_spapr_tce_create {
1341        __u32 argsz;
1342        __u32 flags;
1343        /* in */
1344        __u32 page_shift;
1345        __u32 __resv1;
1346        __u64 window_size;
1347        __u32 levels;
1348        __u32 __resv2;
1349        /* out */
1350        __u64 start_addr;
1351};
1352#define VFIO_IOMMU_SPAPR_TCE_CREATE     _IO(VFIO_TYPE, VFIO_BASE + 19)
1353
1354/**
1355 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
1356 *
1357 * Unprograms a TCE table from all groups in the container and destroys it.
1358 * It receives a PCI bus offset as a window id.
1359 */
1360struct vfio_iommu_spapr_tce_remove {
1361        __u32 argsz;
1362        __u32 flags;
1363        /* in */
1364        __u64 start_addr;
1365};
1366#define VFIO_IOMMU_SPAPR_TCE_REMOVE     _IO(VFIO_TYPE, VFIO_BASE + 20)
1367
1368/* ***************************************************************** */
1369
1370#endif /* VFIO_H */
1371