linux/include/uapi/drm/i915_drm.h
<<
>>
Prefs
   1/*
   2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial portions
  15 * of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#ifndef _UAPI_I915_DRM_H_
  28#define _UAPI_I915_DRM_H_
  29
  30#include "drm.h"
  31
  32#if defined(__cplusplus)
  33extern "C" {
  34#endif
  35
  36/* Please note that modifications to all structs defined here are
  37 * subject to backwards-compatibility constraints.
  38 */
  39
  40/**
  41 * DOC: uevents generated by i915 on it's device node
  42 *
  43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  44 *      event from the gpu l3 cache. Additional information supplied is ROW,
  45 *      BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
  46 *      track of these events and if a specific cache-line seems to have a
  47 *      persistent error remap it with the l3 remapping tool supplied in
  48 *      intel-gpu-tools.  The value supplied with the event is always 1.
  49 *
  50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  51 *      hangcheck. The error detection event is a good indicator of when things
  52 *      began to go badly. The value supplied with the event is a 1 upon error
  53 *      detection, and a 0 upon reset completion, signifying no more error
  54 *      exists. NOTE: Disabling hangcheck or reset via module parameter will
  55 *      cause the related events to not be seen.
  56 *
  57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
  58 *      the GPU. The value supplied with the event is always 1. NOTE: Disable
  59 *      reset via module parameter will cause this event to not be seen.
  60 */
  61#define I915_L3_PARITY_UEVENT           "L3_PARITY_ERROR"
  62#define I915_ERROR_UEVENT               "ERROR"
  63#define I915_RESET_UEVENT               "RESET"
  64
  65/*
  66 * i915_user_extension: Base class for defining a chain of extensions
  67 *
  68 * Many interfaces need to grow over time. In most cases we can simply
  69 * extend the struct and have userspace pass in more data. Another option,
  70 * as demonstrated by Vulkan's approach to providing extensions for forward
  71 * and backward compatibility, is to use a list of optional structs to
  72 * provide those extra details.
  73 *
  74 * The key advantage to using an extension chain is that it allows us to
  75 * redefine the interface more easily than an ever growing struct of
  76 * increasing complexity, and for large parts of that interface to be
  77 * entirely optional. The downside is more pointer chasing; chasing across
  78 * the __user boundary with pointers encapsulated inside u64.
  79 */
  80struct i915_user_extension {
  81        __u64 next_extension;
  82        __u32 name;
  83        __u32 flags; /* All undefined bits must be zero. */
  84        __u32 rsvd[4]; /* Reserved for future use; must be zero. */
  85};
  86
  87/*
  88 * MOCS indexes used for GPU surfaces, defining the cacheability of the
  89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
  90 */
  91enum i915_mocs_table_index {
  92        /*
  93         * Not cached anywhere, coherency between CPU and GPU accesses is
  94         * guaranteed.
  95         */
  96        I915_MOCS_UNCACHED,
  97        /*
  98         * Cacheability and coherency controlled by the kernel automatically
  99         * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
 100         * usage of the surface (used for display scanout or not).
 101         */
 102        I915_MOCS_PTE,
 103        /*
 104         * Cached in all GPU caches available on the platform.
 105         * Coherency between CPU and GPU accesses to the surface is not
 106         * guaranteed without extra synchronization.
 107         */
 108        I915_MOCS_CACHED,
 109};
 110
 111/*
 112 * Different engines serve different roles, and there may be more than one
 113 * engine serving each role. enum drm_i915_gem_engine_class provides a
 114 * classification of the role of the engine, which may be used when requesting
 115 * operations to be performed on a certain subset of engines, or for providing
 116 * information about that group.
 117 */
 118enum drm_i915_gem_engine_class {
 119        I915_ENGINE_CLASS_RENDER        = 0,
 120        I915_ENGINE_CLASS_COPY          = 1,
 121        I915_ENGINE_CLASS_VIDEO         = 2,
 122        I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
 123
 124        /* should be kept compact */
 125
 126        I915_ENGINE_CLASS_INVALID       = -1
 127};
 128
 129/*
 130 * There may be more than one engine fulfilling any role within the system.
 131 * Each engine of a class is given a unique instance number and therefore
 132 * any engine can be specified by its class:instance tuplet. APIs that allow
 133 * access to any engine in the system will use struct i915_engine_class_instance
 134 * for this identification.
 135 */
 136struct i915_engine_class_instance {
 137        __u16 engine_class; /* see enum drm_i915_gem_engine_class */
 138        __u16 engine_instance;
 139#define I915_ENGINE_CLASS_INVALID_NONE -1
 140#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
 141};
 142
 143/**
 144 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
 145 *
 146 */
 147
 148enum drm_i915_pmu_engine_sample {
 149        I915_SAMPLE_BUSY = 0,
 150        I915_SAMPLE_WAIT = 1,
 151        I915_SAMPLE_SEMA = 2
 152};
 153
 154#define I915_PMU_SAMPLE_BITS (4)
 155#define I915_PMU_SAMPLE_MASK (0xf)
 156#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
 157#define I915_PMU_CLASS_SHIFT \
 158        (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
 159
 160#define __I915_PMU_ENGINE(class, instance, sample) \
 161        ((class) << I915_PMU_CLASS_SHIFT | \
 162        (instance) << I915_PMU_SAMPLE_BITS | \
 163        (sample))
 164
 165#define I915_PMU_ENGINE_BUSY(class, instance) \
 166        __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
 167
 168#define I915_PMU_ENGINE_WAIT(class, instance) \
 169        __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
 170
 171#define I915_PMU_ENGINE_SEMA(class, instance) \
 172        __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
 173
 174#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
 175
 176#define I915_PMU_ACTUAL_FREQUENCY       __I915_PMU_OTHER(0)
 177#define I915_PMU_REQUESTED_FREQUENCY    __I915_PMU_OTHER(1)
 178#define I915_PMU_INTERRUPTS             __I915_PMU_OTHER(2)
 179#define I915_PMU_RC6_RESIDENCY          __I915_PMU_OTHER(3)
 180
 181#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
 182
 183/* Each region is a minimum of 16k, and there are at most 255 of them.
 184 */
 185#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
 186                                 * of chars for next/prev indices */
 187#define I915_LOG_MIN_TEX_REGION_SIZE 14
 188
 189typedef struct _drm_i915_init {
 190        enum {
 191                I915_INIT_DMA = 0x01,
 192                I915_CLEANUP_DMA = 0x02,
 193                I915_RESUME_DMA = 0x03
 194        } func;
 195        unsigned int mmio_offset;
 196        int sarea_priv_offset;
 197        unsigned int ring_start;
 198        unsigned int ring_end;
 199        unsigned int ring_size;
 200        unsigned int front_offset;
 201        unsigned int back_offset;
 202        unsigned int depth_offset;
 203        unsigned int w;
 204        unsigned int h;
 205        unsigned int pitch;
 206        unsigned int pitch_bits;
 207        unsigned int back_pitch;
 208        unsigned int depth_pitch;
 209        unsigned int cpp;
 210        unsigned int chipset;
 211} drm_i915_init_t;
 212
 213typedef struct _drm_i915_sarea {
 214        struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
 215        int last_upload;        /* last time texture was uploaded */
 216        int last_enqueue;       /* last time a buffer was enqueued */
 217        int last_dispatch;      /* age of the most recently dispatched buffer */
 218        int ctxOwner;           /* last context to upload state */
 219        int texAge;
 220        int pf_enabled;         /* is pageflipping allowed? */
 221        int pf_active;
 222        int pf_current_page;    /* which buffer is being displayed? */
 223        int perf_boxes;         /* performance boxes to be displayed */
 224        int width, height;      /* screen size in pixels */
 225
 226        drm_handle_t front_handle;
 227        int front_offset;
 228        int front_size;
 229
 230        drm_handle_t back_handle;
 231        int back_offset;
 232        int back_size;
 233
 234        drm_handle_t depth_handle;
 235        int depth_offset;
 236        int depth_size;
 237
 238        drm_handle_t tex_handle;
 239        int tex_offset;
 240        int tex_size;
 241        int log_tex_granularity;
 242        int pitch;
 243        int rotation;           /* 0, 90, 180 or 270 */
 244        int rotated_offset;
 245        int rotated_size;
 246        int rotated_pitch;
 247        int virtualX, virtualY;
 248
 249        unsigned int front_tiled;
 250        unsigned int back_tiled;
 251        unsigned int depth_tiled;
 252        unsigned int rotated_tiled;
 253        unsigned int rotated2_tiled;
 254
 255        int pipeA_x;
 256        int pipeA_y;
 257        int pipeA_w;
 258        int pipeA_h;
 259        int pipeB_x;
 260        int pipeB_y;
 261        int pipeB_w;
 262        int pipeB_h;
 263
 264        /* fill out some space for old userspace triple buffer */
 265        drm_handle_t unused_handle;
 266        __u32 unused1, unused2, unused3;
 267
 268        /* buffer object handles for static buffers. May change
 269         * over the lifetime of the client.
 270         */
 271        __u32 front_bo_handle;
 272        __u32 back_bo_handle;
 273        __u32 unused_bo_handle;
 274        __u32 depth_bo_handle;
 275
 276} drm_i915_sarea_t;
 277
 278/* due to userspace building against these headers we need some compat here */
 279#define planeA_x pipeA_x
 280#define planeA_y pipeA_y
 281#define planeA_w pipeA_w
 282#define planeA_h pipeA_h
 283#define planeB_x pipeB_x
 284#define planeB_y pipeB_y
 285#define planeB_w pipeB_w
 286#define planeB_h pipeB_h
 287
 288/* Flags for perf_boxes
 289 */
 290#define I915_BOX_RING_EMPTY    0x1
 291#define I915_BOX_FLIP          0x2
 292#define I915_BOX_WAIT          0x4
 293#define I915_BOX_TEXTURE_LOAD  0x8
 294#define I915_BOX_LOST_CONTEXT  0x10
 295
 296/*
 297 * i915 specific ioctls.
 298 *
 299 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
 300 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
 301 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
 302 */
 303#define DRM_I915_INIT           0x00
 304#define DRM_I915_FLUSH          0x01
 305#define DRM_I915_FLIP           0x02
 306#define DRM_I915_BATCHBUFFER    0x03
 307#define DRM_I915_IRQ_EMIT       0x04
 308#define DRM_I915_IRQ_WAIT       0x05
 309#define DRM_I915_GETPARAM       0x06
 310#define DRM_I915_SETPARAM       0x07
 311#define DRM_I915_ALLOC          0x08
 312#define DRM_I915_FREE           0x09
 313#define DRM_I915_INIT_HEAP      0x0a
 314#define DRM_I915_CMDBUFFER      0x0b
 315#define DRM_I915_DESTROY_HEAP   0x0c
 316#define DRM_I915_SET_VBLANK_PIPE        0x0d
 317#define DRM_I915_GET_VBLANK_PIPE        0x0e
 318#define DRM_I915_VBLANK_SWAP    0x0f
 319#define DRM_I915_HWS_ADDR       0x11
 320#define DRM_I915_GEM_INIT       0x13
 321#define DRM_I915_GEM_EXECBUFFER 0x14
 322#define DRM_I915_GEM_PIN        0x15
 323#define DRM_I915_GEM_UNPIN      0x16
 324#define DRM_I915_GEM_BUSY       0x17
 325#define DRM_I915_GEM_THROTTLE   0x18
 326#define DRM_I915_GEM_ENTERVT    0x19
 327#define DRM_I915_GEM_LEAVEVT    0x1a
 328#define DRM_I915_GEM_CREATE     0x1b
 329#define DRM_I915_GEM_PREAD      0x1c
 330#define DRM_I915_GEM_PWRITE     0x1d
 331#define DRM_I915_GEM_MMAP       0x1e
 332#define DRM_I915_GEM_SET_DOMAIN 0x1f
 333#define DRM_I915_GEM_SW_FINISH  0x20
 334#define DRM_I915_GEM_SET_TILING 0x21
 335#define DRM_I915_GEM_GET_TILING 0x22
 336#define DRM_I915_GEM_GET_APERTURE 0x23
 337#define DRM_I915_GEM_MMAP_GTT   0x24
 338#define DRM_I915_GET_PIPE_FROM_CRTC_ID  0x25
 339#define DRM_I915_GEM_MADVISE    0x26
 340#define DRM_I915_OVERLAY_PUT_IMAGE      0x27
 341#define DRM_I915_OVERLAY_ATTRS  0x28
 342#define DRM_I915_GEM_EXECBUFFER2        0x29
 343#define DRM_I915_GEM_EXECBUFFER2_WR     DRM_I915_GEM_EXECBUFFER2
 344#define DRM_I915_GET_SPRITE_COLORKEY    0x2a
 345#define DRM_I915_SET_SPRITE_COLORKEY    0x2b
 346#define DRM_I915_GEM_WAIT       0x2c
 347#define DRM_I915_GEM_CONTEXT_CREATE     0x2d
 348#define DRM_I915_GEM_CONTEXT_DESTROY    0x2e
 349#define DRM_I915_GEM_SET_CACHING        0x2f
 350#define DRM_I915_GEM_GET_CACHING        0x30
 351#define DRM_I915_REG_READ               0x31
 352#define DRM_I915_GET_RESET_STATS        0x32
 353#define DRM_I915_GEM_USERPTR            0x33
 354#define DRM_I915_GEM_CONTEXT_GETPARAM   0x34
 355#define DRM_I915_GEM_CONTEXT_SETPARAM   0x35
 356#define DRM_I915_PERF_OPEN              0x36
 357#define DRM_I915_PERF_ADD_CONFIG        0x37
 358#define DRM_I915_PERF_REMOVE_CONFIG     0x38
 359#define DRM_I915_QUERY                  0x39
 360#define DRM_I915_GEM_VM_CREATE          0x3a
 361#define DRM_I915_GEM_VM_DESTROY         0x3b
 362/* Must be kept compact -- no holes */
 363
 364#define DRM_IOCTL_I915_INIT             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 365#define DRM_IOCTL_I915_FLUSH            DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
 366#define DRM_IOCTL_I915_FLIP             DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
 367#define DRM_IOCTL_I915_BATCHBUFFER      DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
 368#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
 369#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
 370#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
 371#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
 372#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
 373#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
 374#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
 375#define DRM_IOCTL_I915_CMDBUFFER        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
 376#define DRM_IOCTL_I915_DESTROY_HEAP     DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
 377#define DRM_IOCTL_I915_SET_VBLANK_PIPE  DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 378#define DRM_IOCTL_I915_GET_VBLANK_PIPE  DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 379#define DRM_IOCTL_I915_VBLANK_SWAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
 380#define DRM_IOCTL_I915_HWS_ADDR         DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 381#define DRM_IOCTL_I915_GEM_INIT         DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 382#define DRM_IOCTL_I915_GEM_EXECBUFFER   DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 383#define DRM_IOCTL_I915_GEM_EXECBUFFER2  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
 384#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR       DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
 385#define DRM_IOCTL_I915_GEM_PIN          DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 386#define DRM_IOCTL_I915_GEM_UNPIN        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 387#define DRM_IOCTL_I915_GEM_BUSY         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
 388#define DRM_IOCTL_I915_GEM_SET_CACHING          DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
 389#define DRM_IOCTL_I915_GEM_GET_CACHING          DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
 390#define DRM_IOCTL_I915_GEM_THROTTLE     DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
 391#define DRM_IOCTL_I915_GEM_ENTERVT      DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 392#define DRM_IOCTL_I915_GEM_LEAVEVT      DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
 393#define DRM_IOCTL_I915_GEM_CREATE       DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
 394#define DRM_IOCTL_I915_GEM_PREAD        DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 395#define DRM_IOCTL_I915_GEM_PWRITE       DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 396#define DRM_IOCTL_I915_GEM_MMAP         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
 397#define DRM_IOCTL_I915_GEM_MMAP_GTT     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
 398#define DRM_IOCTL_I915_GEM_SET_DOMAIN   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 399#define DRM_IOCTL_I915_GEM_SW_FINISH    DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 400#define DRM_IOCTL_I915_GEM_SET_TILING   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
 401#define DRM_IOCTL_I915_GEM_GET_TILING   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
 402#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
 403#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
 404#define DRM_IOCTL_I915_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
 405#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
 406#define DRM_IOCTL_I915_OVERLAY_ATTRS    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
 407#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 408#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 409#define DRM_IOCTL_I915_GEM_WAIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
 410#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE       DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 411#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
 412#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 413#define DRM_IOCTL_I915_REG_READ                 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 414#define DRM_IOCTL_I915_GET_RESET_STATS          DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
 415#define DRM_IOCTL_I915_GEM_USERPTR                      DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
 416#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM     DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
 417#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM     DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
 418#define DRM_IOCTL_I915_PERF_OPEN        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
 419#define DRM_IOCTL_I915_PERF_ADD_CONFIG  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
 420#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
 421#define DRM_IOCTL_I915_QUERY                    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
 422#define DRM_IOCTL_I915_GEM_VM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
 423#define DRM_IOCTL_I915_GEM_VM_DESTROY   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 424
 425/* Allow drivers to submit batchbuffers directly to hardware, relying
 426 * on the security mechanisms provided by hardware.
 427 */
 428typedef struct drm_i915_batchbuffer {
 429        int start;              /* agp offset */
 430        int used;               /* nr bytes in use */
 431        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
 432        int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
 433        int num_cliprects;      /* mulitpass with multiple cliprects? */
 434        struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
 435} drm_i915_batchbuffer_t;
 436
 437/* As above, but pass a pointer to userspace buffer which can be
 438 * validated by the kernel prior to sending to hardware.
 439 */
 440typedef struct _drm_i915_cmdbuffer {
 441        char __user *buf;       /* pointer to userspace command buffer */
 442        int sz;                 /* nr bytes in buf */
 443        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
 444        int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
 445        int num_cliprects;      /* mulitpass with multiple cliprects? */
 446        struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
 447} drm_i915_cmdbuffer_t;
 448
 449/* Userspace can request & wait on irq's:
 450 */
 451typedef struct drm_i915_irq_emit {
 452        int __user *irq_seq;
 453} drm_i915_irq_emit_t;
 454
 455typedef struct drm_i915_irq_wait {
 456        int irq_seq;
 457} drm_i915_irq_wait_t;
 458
 459/*
 460 * Different modes of per-process Graphics Translation Table,
 461 * see I915_PARAM_HAS_ALIASING_PPGTT
 462 */
 463#define I915_GEM_PPGTT_NONE     0
 464#define I915_GEM_PPGTT_ALIASING 1
 465#define I915_GEM_PPGTT_FULL     2
 466
 467/* Ioctl to query kernel params:
 468 */
 469#define I915_PARAM_IRQ_ACTIVE            1
 470#define I915_PARAM_ALLOW_BATCHBUFFER     2
 471#define I915_PARAM_LAST_DISPATCH         3
 472#define I915_PARAM_CHIPSET_ID            4
 473#define I915_PARAM_HAS_GEM               5
 474#define I915_PARAM_NUM_FENCES_AVAIL      6
 475#define I915_PARAM_HAS_OVERLAY           7
 476#define I915_PARAM_HAS_PAGEFLIPPING      8
 477#define I915_PARAM_HAS_EXECBUF2          9
 478#define I915_PARAM_HAS_BSD               10
 479#define I915_PARAM_HAS_BLT               11
 480#define I915_PARAM_HAS_RELAXED_FENCING   12
 481#define I915_PARAM_HAS_COHERENT_RINGS    13
 482#define I915_PARAM_HAS_EXEC_CONSTANTS    14
 483#define I915_PARAM_HAS_RELAXED_DELTA     15
 484#define I915_PARAM_HAS_GEN7_SOL_RESET    16
 485#define I915_PARAM_HAS_LLC               17
 486#define I915_PARAM_HAS_ALIASING_PPGTT    18
 487#define I915_PARAM_HAS_WAIT_TIMEOUT      19
 488#define I915_PARAM_HAS_SEMAPHORES        20
 489#define I915_PARAM_HAS_PRIME_VMAP_FLUSH  21
 490#define I915_PARAM_HAS_VEBOX             22
 491#define I915_PARAM_HAS_SECURE_BATCHES    23
 492#define I915_PARAM_HAS_PINNED_BATCHES    24
 493#define I915_PARAM_HAS_EXEC_NO_RELOC     25
 494#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
 495#define I915_PARAM_HAS_WT                27
 496#define I915_PARAM_CMD_PARSER_VERSION    28
 497#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
 498#define I915_PARAM_MMAP_VERSION          30
 499#define I915_PARAM_HAS_BSD2              31
 500#define I915_PARAM_REVISION              32
 501#define I915_PARAM_SUBSLICE_TOTAL        33
 502#define I915_PARAM_EU_TOTAL              34
 503#define I915_PARAM_HAS_GPU_RESET         35
 504#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 505#define I915_PARAM_HAS_EXEC_SOFTPIN      37
 506#define I915_PARAM_HAS_POOLED_EU         38
 507#define I915_PARAM_MIN_EU_IN_POOL        39
 508#define I915_PARAM_MMAP_GTT_VERSION      40
 509
 510/*
 511 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
 512 * priorities and the driver will attempt to execute batches in priority order.
 513 * The param returns a capability bitmask, nonzero implies that the scheduler
 514 * is enabled, with different features present according to the mask.
 515 *
 516 * The initial priority for each batch is supplied by the context and is
 517 * controlled via I915_CONTEXT_PARAM_PRIORITY.
 518 */
 519#define I915_PARAM_HAS_SCHEDULER         41
 520#define   I915_SCHEDULER_CAP_ENABLED    (1ul << 0)
 521#define   I915_SCHEDULER_CAP_PRIORITY   (1ul << 1)
 522#define   I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
 523#define   I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
 524
 525#define I915_PARAM_HUC_STATUS            42
 526
 527/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
 528 * synchronisation with implicit fencing on individual objects.
 529 * See EXEC_OBJECT_ASYNC.
 530 */
 531#define I915_PARAM_HAS_EXEC_ASYNC        43
 532
 533/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
 534 * both being able to pass in a sync_file fd to wait upon before executing,
 535 * and being able to return a new sync_file fd that is signaled when the
 536 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
 537 */
 538#define I915_PARAM_HAS_EXEC_FENCE        44
 539
 540/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
 541 * user specified bufffers for post-mortem debugging of GPU hangs. See
 542 * EXEC_OBJECT_CAPTURE.
 543 */
 544#define I915_PARAM_HAS_EXEC_CAPTURE      45
 545
 546#define I915_PARAM_SLICE_MASK            46
 547
 548/* Assuming it's uniform for each slice, this queries the mask of subslices
 549 * per-slice for this system.
 550 */
 551#define I915_PARAM_SUBSLICE_MASK         47
 552
 553/*
 554 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
 555 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
 556 */
 557#define I915_PARAM_HAS_EXEC_BATCH_FIRST  48
 558
 559/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
 560 * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
 561 */
 562#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
 563
 564/*
 565 * Query whether every context (both per-file default and user created) is
 566 * isolated (insofar as HW supports). If this parameter is not true, then
 567 * freshly created contexts may inherit values from an existing context,
 568 * rather than default HW values. If true, it also ensures (insofar as HW
 569 * supports) that all state set by this context will not leak to any other
 570 * context.
 571 *
 572 * As not every engine across every gen support contexts, the returned
 573 * value reports the support of context isolation for individual engines by
 574 * returning a bitmask of each engine class set to true if that class supports
 575 * isolation.
 576 */
 577#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
 578
 579/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
 580 * registers. This used to be fixed per platform but from CNL onwards, this
 581 * might vary depending on the parts.
 582 */
 583#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
 584
 585/*
 586 * Once upon a time we supposed that writes through the GGTT would be
 587 * immediately in physical memory (once flushed out of the CPU path). However,
 588 * on a few different processors and chipsets, this is not necessarily the case
 589 * as the writes appear to be buffered internally. Thus a read of the backing
 590 * storage (physical memory) via a different path (with different physical tags
 591 * to the indirect write via the GGTT) will see stale values from before
 592 * the GGTT write. Inside the kernel, we can for the most part keep track of
 593 * the different read/write domains in use (e.g. set-domain), but the assumption
 594 * of coherency is baked into the ABI, hence reporting its true state in this
 595 * parameter.
 596 *
 597 * Reports true when writes via mmap_gtt are immediately visible following an
 598 * lfence to flush the WCB.
 599 *
 600 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
 601 * internal buffer and are _not_ immediately visible to third parties accessing
 602 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
 603 * communications channel when reporting false is strongly disadvised.
 604 */
 605#define I915_PARAM_MMAP_GTT_COHERENT    52
 606
 607/*
 608 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
 609 * execution through use of explicit fence support.
 610 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
 611 */
 612#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
 613/* Must be kept compact -- no holes and well documented */
 614
 615typedef struct drm_i915_getparam {
 616        __s32 param;
 617        /*
 618         * WARNING: Using pointers instead of fixed-size u64 means we need to write
 619         * compat32 code. Don't repeat this mistake.
 620         */
 621        int __user *value;
 622} drm_i915_getparam_t;
 623
 624/* Ioctl to set kernel params:
 625 */
 626#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
 627#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
 628#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
 629#define I915_SETPARAM_NUM_USED_FENCES                     4
 630/* Must be kept compact -- no holes */
 631
 632typedef struct drm_i915_setparam {
 633        int param;
 634        int value;
 635} drm_i915_setparam_t;
 636
 637/* A memory manager for regions of shared memory:
 638 */
 639#define I915_MEM_REGION_AGP 1
 640
 641typedef struct drm_i915_mem_alloc {
 642        int region;
 643        int alignment;
 644        int size;
 645        int __user *region_offset;      /* offset from start of fb or agp */
 646} drm_i915_mem_alloc_t;
 647
 648typedef struct drm_i915_mem_free {
 649        int region;
 650        int region_offset;
 651} drm_i915_mem_free_t;
 652
 653typedef struct drm_i915_mem_init_heap {
 654        int region;
 655        int size;
 656        int start;
 657} drm_i915_mem_init_heap_t;
 658
 659/* Allow memory manager to be torn down and re-initialized (eg on
 660 * rotate):
 661 */
 662typedef struct drm_i915_mem_destroy_heap {
 663        int region;
 664} drm_i915_mem_destroy_heap_t;
 665
 666/* Allow X server to configure which pipes to monitor for vblank signals
 667 */
 668#define DRM_I915_VBLANK_PIPE_A  1
 669#define DRM_I915_VBLANK_PIPE_B  2
 670
 671typedef struct drm_i915_vblank_pipe {
 672        int pipe;
 673} drm_i915_vblank_pipe_t;
 674
 675/* Schedule buffer swap at given vertical blank:
 676 */
 677typedef struct drm_i915_vblank_swap {
 678        drm_drawable_t drawable;
 679        enum drm_vblank_seq_type seqtype;
 680        unsigned int sequence;
 681} drm_i915_vblank_swap_t;
 682
 683typedef struct drm_i915_hws_addr {
 684        __u64 addr;
 685} drm_i915_hws_addr_t;
 686
 687struct drm_i915_gem_init {
 688        /**
 689         * Beginning offset in the GTT to be managed by the DRM memory
 690         * manager.
 691         */
 692        __u64 gtt_start;
 693        /**
 694         * Ending offset in the GTT to be managed by the DRM memory
 695         * manager.
 696         */
 697        __u64 gtt_end;
 698};
 699
 700struct drm_i915_gem_create {
 701        /**
 702         * Requested size for the object.
 703         *
 704         * The (page-aligned) allocated size for the object will be returned.
 705         */
 706        __u64 size;
 707        /**
 708         * Returned handle for the object.
 709         *
 710         * Object handles are nonzero.
 711         */
 712        __u32 handle;
 713        __u32 pad;
 714};
 715
 716struct drm_i915_gem_pread {
 717        /** Handle for the object being read. */
 718        __u32 handle;
 719        __u32 pad;
 720        /** Offset into the object to read from */
 721        __u64 offset;
 722        /** Length of data to read */
 723        __u64 size;
 724        /**
 725         * Pointer to write the data into.
 726         *
 727         * This is a fixed-size type for 32/64 compatibility.
 728         */
 729        __u64 data_ptr;
 730};
 731
 732struct drm_i915_gem_pwrite {
 733        /** Handle for the object being written to. */
 734        __u32 handle;
 735        __u32 pad;
 736        /** Offset into the object to write to */
 737        __u64 offset;
 738        /** Length of data to write */
 739        __u64 size;
 740        /**
 741         * Pointer to read the data from.
 742         *
 743         * This is a fixed-size type for 32/64 compatibility.
 744         */
 745        __u64 data_ptr;
 746};
 747
 748struct drm_i915_gem_mmap {
 749        /** Handle for the object being mapped. */
 750        __u32 handle;
 751        __u32 pad;
 752        /** Offset in the object to map. */
 753        __u64 offset;
 754        /**
 755         * Length of data to map.
 756         *
 757         * The value will be page-aligned.
 758         */
 759        __u64 size;
 760        /**
 761         * Returned pointer the data was mapped at.
 762         *
 763         * This is a fixed-size type for 32/64 compatibility.
 764         */
 765        __u64 addr_ptr;
 766
 767        /**
 768         * Flags for extended behaviour.
 769         *
 770         * Added in version 2.
 771         */
 772        __u64 flags;
 773#define I915_MMAP_WC 0x1
 774};
 775
 776struct drm_i915_gem_mmap_gtt {
 777        /** Handle for the object being mapped. */
 778        __u32 handle;
 779        __u32 pad;
 780        /**
 781         * Fake offset to use for subsequent mmap call
 782         *
 783         * This is a fixed-size type for 32/64 compatibility.
 784         */
 785        __u64 offset;
 786};
 787
 788struct drm_i915_gem_set_domain {
 789        /** Handle for the object */
 790        __u32 handle;
 791
 792        /** New read domains */
 793        __u32 read_domains;
 794
 795        /** New write domain */
 796        __u32 write_domain;
 797};
 798
 799struct drm_i915_gem_sw_finish {
 800        /** Handle for the object */
 801        __u32 handle;
 802};
 803
 804struct drm_i915_gem_relocation_entry {
 805        /**
 806         * Handle of the buffer being pointed to by this relocation entry.
 807         *
 808         * It's appealing to make this be an index into the mm_validate_entry
 809         * list to refer to the buffer, but this allows the driver to create
 810         * a relocation list for state buffers and not re-write it per
 811         * exec using the buffer.
 812         */
 813        __u32 target_handle;
 814
 815        /**
 816         * Value to be added to the offset of the target buffer to make up
 817         * the relocation entry.
 818         */
 819        __u32 delta;
 820
 821        /** Offset in the buffer the relocation entry will be written into */
 822        __u64 offset;
 823
 824        /**
 825         * Offset value of the target buffer that the relocation entry was last
 826         * written as.
 827         *
 828         * If the buffer has the same offset as last time, we can skip syncing
 829         * and writing the relocation.  This value is written back out by
 830         * the execbuffer ioctl when the relocation is written.
 831         */
 832        __u64 presumed_offset;
 833
 834        /**
 835         * Target memory domains read by this operation.
 836         */
 837        __u32 read_domains;
 838
 839        /**
 840         * Target memory domains written by this operation.
 841         *
 842         * Note that only one domain may be written by the whole
 843         * execbuffer operation, so that where there are conflicts,
 844         * the application will get -EINVAL back.
 845         */
 846        __u32 write_domain;
 847};
 848
 849/** @{
 850 * Intel memory domains
 851 *
 852 * Most of these just align with the various caches in
 853 * the system and are used to flush and invalidate as
 854 * objects end up cached in different domains.
 855 */
 856/** CPU cache */
 857#define I915_GEM_DOMAIN_CPU             0x00000001
 858/** Render cache, used by 2D and 3D drawing */
 859#define I915_GEM_DOMAIN_RENDER          0x00000002
 860/** Sampler cache, used by texture engine */
 861#define I915_GEM_DOMAIN_SAMPLER         0x00000004
 862/** Command queue, used to load batch buffers */
 863#define I915_GEM_DOMAIN_COMMAND         0x00000008
 864/** Instruction cache, used by shader programs */
 865#define I915_GEM_DOMAIN_INSTRUCTION     0x00000010
 866/** Vertex address cache */
 867#define I915_GEM_DOMAIN_VERTEX          0x00000020
 868/** GTT domain - aperture and scanout */
 869#define I915_GEM_DOMAIN_GTT             0x00000040
 870/** WC domain - uncached access */
 871#define I915_GEM_DOMAIN_WC              0x00000080
 872/** @} */
 873
 874struct drm_i915_gem_exec_object {
 875        /**
 876         * User's handle for a buffer to be bound into the GTT for this
 877         * operation.
 878         */
 879        __u32 handle;
 880
 881        /** Number of relocations to be performed on this buffer */
 882        __u32 relocation_count;
 883        /**
 884         * Pointer to array of struct drm_i915_gem_relocation_entry containing
 885         * the relocations to be performed in this buffer.
 886         */
 887        __u64 relocs_ptr;
 888
 889        /** Required alignment in graphics aperture */
 890        __u64 alignment;
 891
 892        /**
 893         * Returned value of the updated offset of the object, for future
 894         * presumed_offset writes.
 895         */
 896        __u64 offset;
 897};
 898
 899struct drm_i915_gem_execbuffer {
 900        /**
 901         * List of buffers to be validated with their relocations to be
 902         * performend on them.
 903         *
 904         * This is a pointer to an array of struct drm_i915_gem_validate_entry.
 905         *
 906         * These buffers must be listed in an order such that all relocations
 907         * a buffer is performing refer to buffers that have already appeared
 908         * in the validate list.
 909         */
 910        __u64 buffers_ptr;
 911        __u32 buffer_count;
 912
 913        /** Offset in the batchbuffer to start execution from. */
 914        __u32 batch_start_offset;
 915        /** Bytes used in batchbuffer from batch_start_offset */
 916        __u32 batch_len;
 917        __u32 DR1;
 918        __u32 DR4;
 919        __u32 num_cliprects;
 920        /** This is a struct drm_clip_rect *cliprects */
 921        __u64 cliprects_ptr;
 922};
 923
 924struct drm_i915_gem_exec_object2 {
 925        /**
 926         * User's handle for a buffer to be bound into the GTT for this
 927         * operation.
 928         */
 929        __u32 handle;
 930
 931        /** Number of relocations to be performed on this buffer */
 932        __u32 relocation_count;
 933        /**
 934         * Pointer to array of struct drm_i915_gem_relocation_entry containing
 935         * the relocations to be performed in this buffer.
 936         */
 937        __u64 relocs_ptr;
 938
 939        /** Required alignment in graphics aperture */
 940        __u64 alignment;
 941
 942        /**
 943         * When the EXEC_OBJECT_PINNED flag is specified this is populated by
 944         * the user with the GTT offset at which this object will be pinned.
 945         * When the I915_EXEC_NO_RELOC flag is specified this must contain the
 946         * presumed_offset of the object.
 947         * During execbuffer2 the kernel populates it with the value of the
 948         * current GTT offset of the object, for future presumed_offset writes.
 949         */
 950        __u64 offset;
 951
 952#define EXEC_OBJECT_NEEDS_FENCE          (1<<0)
 953#define EXEC_OBJECT_NEEDS_GTT            (1<<1)
 954#define EXEC_OBJECT_WRITE                (1<<2)
 955#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 956#define EXEC_OBJECT_PINNED               (1<<4)
 957#define EXEC_OBJECT_PAD_TO_SIZE          (1<<5)
 958/* The kernel implicitly tracks GPU activity on all GEM objects, and
 959 * synchronises operations with outstanding rendering. This includes
 960 * rendering on other devices if exported via dma-buf. However, sometimes
 961 * this tracking is too coarse and the user knows better. For example,
 962 * if the object is split into non-overlapping ranges shared between different
 963 * clients or engines (i.e. suballocating objects), the implicit tracking
 964 * by kernel assumes that each operation affects the whole object rather
 965 * than an individual range, causing needless synchronisation between clients.
 966 * The kernel will also forgo any CPU cache flushes prior to rendering from
 967 * the object as the client is expected to be also handling such domain
 968 * tracking.
 969 *
 970 * The kernel maintains the implicit tracking in order to manage resources
 971 * used by the GPU - this flag only disables the synchronisation prior to
 972 * rendering with this object in this execbuf.
 973 *
 974 * Opting out of implicit synhronisation requires the user to do its own
 975 * explicit tracking to avoid rendering corruption. See, for example,
 976 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
 977 */
 978#define EXEC_OBJECT_ASYNC               (1<<6)
 979/* Request that the contents of this execobject be copied into the error
 980 * state upon a GPU hang involving this batch for post-mortem debugging.
 981 * These buffers are recorded in no particular order as "user" in
 982 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
 983 * if the kernel supports this flag.
 984 */
 985#define EXEC_OBJECT_CAPTURE             (1<<7)
 986/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
 987#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
 988        __u64 flags;
 989
 990        union {
 991                __u64 rsvd1;
 992                __u64 pad_to_size;
 993        };
 994        __u64 rsvd2;
 995};
 996
 997struct drm_i915_gem_exec_fence {
 998        /**
 999         * User's handle for a drm_syncobj to wait on or signal.
1000         */
1001        __u32 handle;
1002
1003#define I915_EXEC_FENCE_WAIT            (1<<0)
1004#define I915_EXEC_FENCE_SIGNAL          (1<<1)
1005#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1006        __u32 flags;
1007};
1008
1009struct drm_i915_gem_execbuffer2 {
1010        /**
1011         * List of gem_exec_object2 structs
1012         */
1013        __u64 buffers_ptr;
1014        __u32 buffer_count;
1015
1016        /** Offset in the batchbuffer to start execution from. */
1017        __u32 batch_start_offset;
1018        /** Bytes used in batchbuffer from batch_start_offset */
1019        __u32 batch_len;
1020        __u32 DR1;
1021        __u32 DR4;
1022        __u32 num_cliprects;
1023        /**
1024         * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1025         * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
1026         * struct drm_i915_gem_exec_fence *fences.
1027         */
1028        __u64 cliprects_ptr;
1029#define I915_EXEC_RING_MASK              (0x3f)
1030#define I915_EXEC_DEFAULT                (0<<0)
1031#define I915_EXEC_RENDER                 (1<<0)
1032#define I915_EXEC_BSD                    (2<<0)
1033#define I915_EXEC_BLT                    (3<<0)
1034#define I915_EXEC_VEBOX                  (4<<0)
1035
1036/* Used for switching the constants addressing mode on gen4+ RENDER ring.
1037 * Gen6+ only supports relative addressing to dynamic state (default) and
1038 * absolute addressing.
1039 *
1040 * These flags are ignored for the BSD and BLT rings.
1041 */
1042#define I915_EXEC_CONSTANTS_MASK        (3<<6)
1043#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1044#define I915_EXEC_CONSTANTS_ABSOLUTE    (1<<6)
1045#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1046        __u64 flags;
1047        __u64 rsvd1; /* now used for context info */
1048        __u64 rsvd2;
1049};
1050
1051/** Resets the SO write offset registers for transform feedback on gen7. */
1052#define I915_EXEC_GEN7_SOL_RESET        (1<<8)
1053
1054/** Request a privileged ("secure") batch buffer. Note only available for
1055 * DRM_ROOT_ONLY | DRM_MASTER processes.
1056 */
1057#define I915_EXEC_SECURE                (1<<9)
1058
1059/** Inform the kernel that the batch is and will always be pinned. This
1060 * negates the requirement for a workaround to be performed to avoid
1061 * an incoherent CS (such as can be found on 830/845). If this flag is
1062 * not passed, the kernel will endeavour to make sure the batch is
1063 * coherent with the CS before execution. If this flag is passed,
1064 * userspace assumes the responsibility for ensuring the same.
1065 */
1066#define I915_EXEC_IS_PINNED             (1<<10)
1067
1068/** Provide a hint to the kernel that the command stream and auxiliary
1069 * state buffers already holds the correct presumed addresses and so the
1070 * relocation process may be skipped if no buffers need to be moved in
1071 * preparation for the execbuffer.
1072 */
1073#define I915_EXEC_NO_RELOC              (1<<11)
1074
1075/** Use the reloc.handle as an index into the exec object array rather
1076 * than as the per-file handle.
1077 */
1078#define I915_EXEC_HANDLE_LUT            (1<<12)
1079
1080/** Used for switching BSD rings on the platforms with two BSD rings */
1081#define I915_EXEC_BSD_SHIFT      (13)
1082#define I915_EXEC_BSD_MASK       (3 << I915_EXEC_BSD_SHIFT)
1083/* default ping-pong mode */
1084#define I915_EXEC_BSD_DEFAULT    (0 << I915_EXEC_BSD_SHIFT)
1085#define I915_EXEC_BSD_RING1      (1 << I915_EXEC_BSD_SHIFT)
1086#define I915_EXEC_BSD_RING2      (2 << I915_EXEC_BSD_SHIFT)
1087
1088/** Tell the kernel that the batchbuffer is processed by
1089 *  the resource streamer.
1090 */
1091#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1092
1093/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1094 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1095 * the batch.
1096 *
1097 * Returns -EINVAL if the sync_file fd cannot be found.
1098 */
1099#define I915_EXEC_FENCE_IN              (1<<16)
1100
1101/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1102 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1103 * to the caller, and it should be close() after use. (The fd is a regular
1104 * file descriptor and will be cleaned up on process termination. It holds
1105 * a reference to the request, but nothing else.)
1106 *
1107 * The sync_file fd can be combined with other sync_file and passed either
1108 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1109 * will only occur after this request completes), or to other devices.
1110 *
1111 * Using I915_EXEC_FENCE_OUT requires use of
1112 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1113 * back to userspace. Failure to do so will cause the out-fence to always
1114 * be reported as zero, and the real fence fd to be leaked.
1115 */
1116#define I915_EXEC_FENCE_OUT             (1<<17)
1117
1118/*
1119 * Traditionally the execbuf ioctl has only considered the final element in
1120 * the execobject[] to be the executable batch. Often though, the client
1121 * will known the batch object prior to construction and being able to place
1122 * it into the execobject[] array first can simplify the relocation tracking.
1123 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1124 * execobject[] as the * batch instead (the default is to use the last
1125 * element).
1126 */
1127#define I915_EXEC_BATCH_FIRST           (1<<18)
1128
1129/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1130 * define an array of i915_gem_exec_fence structures which specify a set of
1131 * dma fences to wait upon or signal.
1132 */
1133#define I915_EXEC_FENCE_ARRAY   (1<<19)
1134
1135/*
1136 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1137 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1138 * the batch.
1139 *
1140 * Returns -EINVAL if the sync_file fd cannot be found.
1141 */
1142#define I915_EXEC_FENCE_SUBMIT          (1 << 20)
1143
1144#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
1145
1146#define I915_EXEC_CONTEXT_ID_MASK       (0xffffffff)
1147#define i915_execbuffer2_set_context_id(eb2, context) \
1148        (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1149#define i915_execbuffer2_get_context_id(eb2) \
1150        ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1151
1152struct drm_i915_gem_pin {
1153        /** Handle of the buffer to be pinned. */
1154        __u32 handle;
1155        __u32 pad;
1156
1157        /** alignment required within the aperture */
1158        __u64 alignment;
1159
1160        /** Returned GTT offset of the buffer. */
1161        __u64 offset;
1162};
1163
1164struct drm_i915_gem_unpin {
1165        /** Handle of the buffer to be unpinned. */
1166        __u32 handle;
1167        __u32 pad;
1168};
1169
1170struct drm_i915_gem_busy {
1171        /** Handle of the buffer to check for busy */
1172        __u32 handle;
1173
1174        /** Return busy status
1175         *
1176         * A return of 0 implies that the object is idle (after
1177         * having flushed any pending activity), and a non-zero return that
1178         * the object is still in-flight on the GPU. (The GPU has not yet
1179         * signaled completion for all pending requests that reference the
1180         * object.) An object is guaranteed to become idle eventually (so
1181         * long as no new GPU commands are executed upon it). Due to the
1182         * asynchronous nature of the hardware, an object reported
1183         * as busy may become idle before the ioctl is completed.
1184         *
1185         * Furthermore, if the object is busy, which engine is busy is only
1186         * provided as a guide and only indirectly by reporting its class
1187         * (there may be more than one engine in each class). There are race
1188         * conditions which prevent the report of which engines are busy from
1189         * being always accurate.  However, the converse is not true. If the
1190         * object is idle, the result of the ioctl, that all engines are idle,
1191         * is accurate.
1192         *
1193         * The returned dword is split into two fields to indicate both
1194         * the engine classess on which the object is being read, and the
1195         * engine class on which it is currently being written (if any).
1196         *
1197         * The low word (bits 0:15) indicate if the object is being written
1198         * to by any engine (there can only be one, as the GEM implicit
1199         * synchronisation rules force writes to be serialised). Only the
1200         * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1201         * 1 not 0 etc) for the last write is reported.
1202         *
1203         * The high word (bits 16:31) are a bitmask of which engines classes
1204         * are currently reading from the object. Multiple engines may be
1205         * reading from the object simultaneously.
1206         *
1207         * The value of each engine class is the same as specified in the
1208         * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1209         * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1210         * reported as active itself. Some hardware may have parallel
1211         * execution engines, e.g. multiple media engines, which are
1212         * mapped to the same class identifier and so are not separately
1213         * reported for busyness.
1214         *
1215         * Caveat emptor:
1216         * Only the boolean result of this query is reliable; that is whether
1217         * the object is idle or busy. The report of which engines are busy
1218         * should be only used as a heuristic.
1219         */
1220        __u32 busy;
1221};
1222
1223/**
1224 * I915_CACHING_NONE
1225 *
1226 * GPU access is not coherent with cpu caches. Default for machines without an
1227 * LLC.
1228 */
1229#define I915_CACHING_NONE               0
1230/**
1231 * I915_CACHING_CACHED
1232 *
1233 * GPU access is coherent with cpu caches and furthermore the data is cached in
1234 * last-level caches shared between cpu cores and the gpu GT. Default on
1235 * machines with HAS_LLC.
1236 */
1237#define I915_CACHING_CACHED             1
1238/**
1239 * I915_CACHING_DISPLAY
1240 *
1241 * Special GPU caching mode which is coherent with the scanout engines.
1242 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1243 * cache mode (like write-through or gfdt flushing) is available. The kernel
1244 * automatically sets this mode when using a buffer as a scanout target.
1245 * Userspace can manually set this mode to avoid a costly stall and clflush in
1246 * the hotpath of drawing the first frame.
1247 */
1248#define I915_CACHING_DISPLAY            2
1249
1250struct drm_i915_gem_caching {
1251        /**
1252         * Handle of the buffer to set/get the caching level of. */
1253        __u32 handle;
1254
1255        /**
1256         * Cacheing level to apply or return value
1257         *
1258         * bits0-15 are for generic caching control (i.e. the above defined
1259         * values). bits16-31 are reserved for platform-specific variations
1260         * (e.g. l3$ caching on gen7). */
1261        __u32 caching;
1262};
1263
1264#define I915_TILING_NONE        0
1265#define I915_TILING_X           1
1266#define I915_TILING_Y           2
1267#define I915_TILING_LAST        I915_TILING_Y
1268
1269#define I915_BIT_6_SWIZZLE_NONE         0
1270#define I915_BIT_6_SWIZZLE_9            1
1271#define I915_BIT_6_SWIZZLE_9_10         2
1272#define I915_BIT_6_SWIZZLE_9_11         3
1273#define I915_BIT_6_SWIZZLE_9_10_11      4
1274/* Not seen by userland */
1275#define I915_BIT_6_SWIZZLE_UNKNOWN      5
1276/* Seen by userland. */
1277#define I915_BIT_6_SWIZZLE_9_17         6
1278#define I915_BIT_6_SWIZZLE_9_10_17      7
1279
1280struct drm_i915_gem_set_tiling {
1281        /** Handle of the buffer to have its tiling state updated */
1282        __u32 handle;
1283
1284        /**
1285         * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1286         * I915_TILING_Y).
1287         *
1288         * This value is to be set on request, and will be updated by the
1289         * kernel on successful return with the actual chosen tiling layout.
1290         *
1291         * The tiling mode may be demoted to I915_TILING_NONE when the system
1292         * has bit 6 swizzling that can't be managed correctly by GEM.
1293         *
1294         * Buffer contents become undefined when changing tiling_mode.
1295         */
1296        __u32 tiling_mode;
1297
1298        /**
1299         * Stride in bytes for the object when in I915_TILING_X or
1300         * I915_TILING_Y.
1301         */
1302        __u32 stride;
1303
1304        /**
1305         * Returned address bit 6 swizzling required for CPU access through
1306         * mmap mapping.
1307         */
1308        __u32 swizzle_mode;
1309};
1310
1311struct drm_i915_gem_get_tiling {
1312        /** Handle of the buffer to get tiling state for. */
1313        __u32 handle;
1314
1315        /**
1316         * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1317         * I915_TILING_Y).
1318         */
1319        __u32 tiling_mode;
1320
1321        /**
1322         * Returned address bit 6 swizzling required for CPU access through
1323         * mmap mapping.
1324         */
1325        __u32 swizzle_mode;
1326
1327        /**
1328         * Returned address bit 6 swizzling required for CPU access through
1329         * mmap mapping whilst bound.
1330         */
1331        __u32 phys_swizzle_mode;
1332};
1333
1334struct drm_i915_gem_get_aperture {
1335        /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1336        __u64 aper_size;
1337
1338        /**
1339         * Available space in the aperture used by i915_gem_execbuffer, in
1340         * bytes
1341         */
1342        __u64 aper_available_size;
1343};
1344
1345struct drm_i915_get_pipe_from_crtc_id {
1346        /** ID of CRTC being requested **/
1347        __u32 crtc_id;
1348
1349        /** pipe of requested CRTC **/
1350        __u32 pipe;
1351};
1352
1353#define I915_MADV_WILLNEED 0
1354#define I915_MADV_DONTNEED 1
1355#define __I915_MADV_PURGED 2 /* internal state */
1356
1357struct drm_i915_gem_madvise {
1358        /** Handle of the buffer to change the backing store advice */
1359        __u32 handle;
1360
1361        /* Advice: either the buffer will be needed again in the near future,
1362         *         or wont be and could be discarded under memory pressure.
1363         */
1364        __u32 madv;
1365
1366        /** Whether the backing store still exists. */
1367        __u32 retained;
1368};
1369
1370/* flags */
1371#define I915_OVERLAY_TYPE_MASK          0xff
1372#define I915_OVERLAY_YUV_PLANAR         0x01
1373#define I915_OVERLAY_YUV_PACKED         0x02
1374#define I915_OVERLAY_RGB                0x03
1375
1376#define I915_OVERLAY_DEPTH_MASK         0xff00
1377#define I915_OVERLAY_RGB24              0x1000
1378#define I915_OVERLAY_RGB16              0x2000
1379#define I915_OVERLAY_RGB15              0x3000
1380#define I915_OVERLAY_YUV422             0x0100
1381#define I915_OVERLAY_YUV411             0x0200
1382#define I915_OVERLAY_YUV420             0x0300
1383#define I915_OVERLAY_YUV410             0x0400
1384
1385#define I915_OVERLAY_SWAP_MASK          0xff0000
1386#define I915_OVERLAY_NO_SWAP            0x000000
1387#define I915_OVERLAY_UV_SWAP            0x010000
1388#define I915_OVERLAY_Y_SWAP             0x020000
1389#define I915_OVERLAY_Y_AND_UV_SWAP      0x030000
1390
1391#define I915_OVERLAY_FLAGS_MASK         0xff000000
1392#define I915_OVERLAY_ENABLE             0x01000000
1393
1394struct drm_intel_overlay_put_image {
1395        /* various flags and src format description */
1396        __u32 flags;
1397        /* source picture description */
1398        __u32 bo_handle;
1399        /* stride values and offsets are in bytes, buffer relative */
1400        __u16 stride_Y; /* stride for packed formats */
1401        __u16 stride_UV;
1402        __u32 offset_Y; /* offset for packet formats */
1403        __u32 offset_U;
1404        __u32 offset_V;
1405        /* in pixels */
1406        __u16 src_width;
1407        __u16 src_height;
1408        /* to compensate the scaling factors for partially covered surfaces */
1409        __u16 src_scan_width;
1410        __u16 src_scan_height;
1411        /* output crtc description */
1412        __u32 crtc_id;
1413        __u16 dst_x;
1414        __u16 dst_y;
1415        __u16 dst_width;
1416        __u16 dst_height;
1417};
1418
1419/* flags */
1420#define I915_OVERLAY_UPDATE_ATTRS       (1<<0)
1421#define I915_OVERLAY_UPDATE_GAMMA       (1<<1)
1422#define I915_OVERLAY_DISABLE_DEST_COLORKEY      (1<<2)
1423struct drm_intel_overlay_attrs {
1424        __u32 flags;
1425        __u32 color_key;
1426        __s32 brightness;
1427        __u32 contrast;
1428        __u32 saturation;
1429        __u32 gamma0;
1430        __u32 gamma1;
1431        __u32 gamma2;
1432        __u32 gamma3;
1433        __u32 gamma4;
1434        __u32 gamma5;
1435};
1436
1437/*
1438 * Intel sprite handling
1439 *
1440 * Color keying works with a min/mask/max tuple.  Both source and destination
1441 * color keying is allowed.
1442 *
1443 * Source keying:
1444 * Sprite pixels within the min & max values, masked against the color channels
1445 * specified in the mask field, will be transparent.  All other pixels will
1446 * be displayed on top of the primary plane.  For RGB surfaces, only the min
1447 * and mask fields will be used; ranged compares are not allowed.
1448 *
1449 * Destination keying:
1450 * Primary plane pixels that match the min value, masked against the color
1451 * channels specified in the mask field, will be replaced by corresponding
1452 * pixels from the sprite plane.
1453 *
1454 * Note that source & destination keying are exclusive; only one can be
1455 * active on a given plane.
1456 */
1457
1458#define I915_SET_COLORKEY_NONE          (1<<0) /* Deprecated. Instead set
1459                                                * flags==0 to disable colorkeying.
1460                                                */
1461#define I915_SET_COLORKEY_DESTINATION   (1<<1)
1462#define I915_SET_COLORKEY_SOURCE        (1<<2)
1463struct drm_intel_sprite_colorkey {
1464        __u32 plane_id;
1465        __u32 min_value;
1466        __u32 channel_mask;
1467        __u32 max_value;
1468        __u32 flags;
1469};
1470
1471struct drm_i915_gem_wait {
1472        /** Handle of BO we shall wait on */
1473        __u32 bo_handle;
1474        __u32 flags;
1475        /** Number of nanoseconds to wait, Returns time remaining. */
1476        __s64 timeout_ns;
1477};
1478
1479struct drm_i915_gem_context_create {
1480        __u32 ctx_id; /* output: id of new context*/
1481        __u32 pad;
1482};
1483
1484struct drm_i915_gem_context_create_ext {
1485        __u32 ctx_id; /* output: id of new context*/
1486        __u32 flags;
1487#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS        (1u << 0)
1488#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE       (1u << 1)
1489#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1490        (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1491        __u64 extensions;
1492};
1493
1494struct drm_i915_gem_context_param {
1495        __u32 ctx_id;
1496        __u32 size;
1497        __u64 param;
1498#define I915_CONTEXT_PARAM_BAN_PERIOD   0x1
1499#define I915_CONTEXT_PARAM_NO_ZEROMAP   0x2
1500#define I915_CONTEXT_PARAM_GTT_SIZE     0x3
1501#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE     0x4
1502#define I915_CONTEXT_PARAM_BANNABLE     0x5
1503#define I915_CONTEXT_PARAM_PRIORITY     0x6
1504#define   I915_CONTEXT_MAX_USER_PRIORITY        1023 /* inclusive */
1505#define   I915_CONTEXT_DEFAULT_PRIORITY         0
1506#define   I915_CONTEXT_MIN_USER_PRIORITY        -1023 /* inclusive */
1507        /*
1508         * When using the following param, value should be a pointer to
1509         * drm_i915_gem_context_param_sseu.
1510         */
1511#define I915_CONTEXT_PARAM_SSEU         0x7
1512
1513/*
1514 * Not all clients may want to attempt automatic recover of a context after
1515 * a hang (for example, some clients may only submit very small incremental
1516 * batches relying on known logical state of previous batches which will never
1517 * recover correctly and each attempt will hang), and so would prefer that
1518 * the context is forever banned instead.
1519 *
1520 * If set to false (0), after a reset, subsequent (and in flight) rendering
1521 * from this context is discarded, and the client will need to create a new
1522 * context to use instead.
1523 *
1524 * If set to true (1), the kernel will automatically attempt to recover the
1525 * context by skipping the hanging batch and executing the next batch starting
1526 * from the default context state (discarding the incomplete logical context
1527 * state lost due to the reset).
1528 *
1529 * On creation, all new contexts are marked as recoverable.
1530 */
1531#define I915_CONTEXT_PARAM_RECOVERABLE  0x8
1532
1533        /*
1534         * The id of the associated virtual memory address space (ppGTT) of
1535         * this context. Can be retrieved and passed to another context
1536         * (on the same fd) for both to use the same ppGTT and so share
1537         * address layouts, and avoid reloading the page tables on context
1538         * switches between themselves.
1539         *
1540         * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
1541         */
1542#define I915_CONTEXT_PARAM_VM           0x9
1543
1544/*
1545 * I915_CONTEXT_PARAM_ENGINES:
1546 *
1547 * Bind this context to operate on this subset of available engines. Henceforth,
1548 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1549 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1550 * and upwards. Slots 0...N are filled in using the specified (class, instance).
1551 * Use
1552 *      engine_class: I915_ENGINE_CLASS_INVALID,
1553 *      engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1554 * to specify a gap in the array that can be filled in later, e.g. by a
1555 * virtual engine used for load balancing.
1556 *
1557 * Setting the number of engines bound to the context to 0, by passing a zero
1558 * sized argument, will revert back to default settings.
1559 *
1560 * See struct i915_context_param_engines.
1561 *
1562 * Extensions:
1563 *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1564 *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1565 */
1566#define I915_CONTEXT_PARAM_ENGINES      0xa
1567/* Must be kept compact -- no holes and well documented */
1568
1569        __u64 value;
1570};
1571
1572/**
1573 * Context SSEU programming
1574 *
1575 * It may be necessary for either functional or performance reason to configure
1576 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1577 * Sub-slice/EU).
1578 *
1579 * This is done by configuring SSEU configuration using the below
1580 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1581 * userspace intends to use.
1582 *
1583 * Not all GPUs or engines support this functionality in which case an error
1584 * code -ENODEV will be returned.
1585 *
1586 * Also, flexibility of possible SSEU configuration permutations varies between
1587 * GPU generations and software imposed limitations. Requesting such a
1588 * combination will return an error code of -EINVAL.
1589 *
1590 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1591 * favour of a single global setting.
1592 */
1593struct drm_i915_gem_context_param_sseu {
1594        /*
1595         * Engine class & instance to be configured or queried.
1596         */
1597        struct i915_engine_class_instance engine;
1598
1599        /*
1600         * Unknown flags must be cleared to zero.
1601         */
1602        __u32 flags;
1603#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1604
1605        /*
1606         * Mask of slices to enable for the context. Valid values are a subset
1607         * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1608         */
1609        __u64 slice_mask;
1610
1611        /*
1612         * Mask of subslices to enable for the context. Valid values are a
1613         * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1614         */
1615        __u64 subslice_mask;
1616
1617        /*
1618         * Minimum/Maximum number of EUs to enable per subslice for the
1619         * context. min_eus_per_subslice must be inferior or equal to
1620         * max_eus_per_subslice.
1621         */
1622        __u16 min_eus_per_subslice;
1623        __u16 max_eus_per_subslice;
1624
1625        /*
1626         * Unused for now. Must be cleared to zero.
1627         */
1628        __u32 rsvd;
1629};
1630
1631/*
1632 * i915_context_engines_load_balance:
1633 *
1634 * Enable load balancing across this set of engines.
1635 *
1636 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
1637 * used will proxy the execbuffer request onto one of the set of engines
1638 * in such a way as to distribute the load evenly across the set.
1639 *
1640 * The set of engines must be compatible (e.g. the same HW class) as they
1641 * will share the same logical GPU context and ring.
1642 *
1643 * To intermix rendering with the virtual engine and direct rendering onto
1644 * the backing engines (bypassing the load balancing proxy), the context must
1645 * be defined to use a single timeline for all engines.
1646 */
1647struct i915_context_engines_load_balance {
1648        struct i915_user_extension base;
1649
1650        __u16 engine_index;
1651        __u16 num_siblings;
1652        __u32 flags; /* all undefined flags must be zero */
1653
1654        __u64 mbz64; /* reserved for future use; must be zero */
1655
1656        struct i915_engine_class_instance engines[0];
1657} __attribute__((packed));
1658
1659#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
1660        struct i915_user_extension base; \
1661        __u16 engine_index; \
1662        __u16 num_siblings; \
1663        __u32 flags; \
1664        __u64 mbz64; \
1665        struct i915_engine_class_instance engines[N__]; \
1666} __attribute__((packed)) name__
1667
1668/*
1669 * i915_context_engines_bond:
1670 *
1671 * Constructed bonded pairs for execution within a virtual engine.
1672 *
1673 * All engines are equal, but some are more equal than others. Given
1674 * the distribution of resources in the HW, it may be preferable to run
1675 * a request on a given subset of engines in parallel to a request on a
1676 * specific engine. We enable this selection of engines within a virtual
1677 * engine by specifying bonding pairs, for any given master engine we will
1678 * only execute on one of the corresponding siblings within the virtual engine.
1679 *
1680 * To execute a request in parallel on the master engine and a sibling requires
1681 * coordination with a I915_EXEC_FENCE_SUBMIT.
1682 */
1683struct i915_context_engines_bond {
1684        struct i915_user_extension base;
1685
1686        struct i915_engine_class_instance master;
1687
1688        __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
1689        __u16 num_bonds;
1690
1691        __u64 flags; /* all undefined flags must be zero */
1692        __u64 mbz64[4]; /* reserved for future use; must be zero */
1693
1694        struct i915_engine_class_instance engines[0];
1695} __attribute__((packed));
1696
1697#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
1698        struct i915_user_extension base; \
1699        struct i915_engine_class_instance master; \
1700        __u16 virtual_index; \
1701        __u16 num_bonds; \
1702        __u64 flags; \
1703        __u64 mbz64[4]; \
1704        struct i915_engine_class_instance engines[N__]; \
1705} __attribute__((packed)) name__
1706
1707struct i915_context_param_engines {
1708        __u64 extensions; /* linked chain of extension blocks, 0 terminates */
1709#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
1710#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
1711        struct i915_engine_class_instance engines[0];
1712} __attribute__((packed));
1713
1714#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
1715        __u64 extensions; \
1716        struct i915_engine_class_instance engines[N__]; \
1717} __attribute__((packed)) name__
1718
1719struct drm_i915_gem_context_create_ext_setparam {
1720#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1721        struct i915_user_extension base;
1722        struct drm_i915_gem_context_param param;
1723};
1724
1725struct drm_i915_gem_context_create_ext_clone {
1726#define I915_CONTEXT_CREATE_EXT_CLONE 1
1727        struct i915_user_extension base;
1728        __u32 clone_id;
1729        __u32 flags;
1730#define I915_CONTEXT_CLONE_ENGINES      (1u << 0)
1731#define I915_CONTEXT_CLONE_FLAGS        (1u << 1)
1732#define I915_CONTEXT_CLONE_SCHEDATTR    (1u << 2)
1733#define I915_CONTEXT_CLONE_SSEU         (1u << 3)
1734#define I915_CONTEXT_CLONE_TIMELINE     (1u << 4)
1735#define I915_CONTEXT_CLONE_VM           (1u << 5)
1736#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
1737        __u64 rsvd;
1738};
1739
1740struct drm_i915_gem_context_destroy {
1741        __u32 ctx_id;
1742        __u32 pad;
1743};
1744
1745/*
1746 * DRM_I915_GEM_VM_CREATE -
1747 *
1748 * Create a new virtual memory address space (ppGTT) for use within a context
1749 * on the same file. Extensions can be provided to configure exactly how the
1750 * address space is setup upon creation.
1751 *
1752 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1753 * returned in the outparam @id.
1754 *
1755 * No flags are defined, with all bits reserved and must be zero.
1756 *
1757 * An extension chain maybe provided, starting with @extensions, and terminated
1758 * by the @next_extension being 0. Currently, no extensions are defined.
1759 *
1760 * DRM_I915_GEM_VM_DESTROY -
1761 *
1762 * Destroys a previously created VM id, specified in @id.
1763 *
1764 * No extensions or flags are allowed currently, and so must be zero.
1765 */
1766struct drm_i915_gem_vm_control {
1767        __u64 extensions;
1768        __u32 flags;
1769        __u32 vm_id;
1770};
1771
1772struct drm_i915_reg_read {
1773        /*
1774         * Register offset.
1775         * For 64bit wide registers where the upper 32bits don't immediately
1776         * follow the lower 32bits, the offset of the lower 32bits must
1777         * be specified
1778         */
1779        __u64 offset;
1780#define I915_REG_READ_8B_WA (1ul << 0)
1781
1782        __u64 val; /* Return value */
1783};
1784
1785/* Known registers:
1786 *
1787 * Render engine timestamp - 0x2358 + 64bit - gen7+
1788 * - Note this register returns an invalid value if using the default
1789 *   single instruction 8byte read, in order to workaround that pass
1790 *   flag I915_REG_READ_8B_WA in offset field.
1791 *
1792 */
1793
1794struct drm_i915_reset_stats {
1795        __u32 ctx_id;
1796        __u32 flags;
1797
1798        /* All resets since boot/module reload, for all contexts */
1799        __u32 reset_count;
1800
1801        /* Number of batches lost when active in GPU, for this context */
1802        __u32 batch_active;
1803
1804        /* Number of batches lost pending for execution, for this context */
1805        __u32 batch_pending;
1806
1807        __u32 pad;
1808};
1809
1810struct drm_i915_gem_userptr {
1811        __u64 user_ptr;
1812        __u64 user_size;
1813        __u32 flags;
1814#define I915_USERPTR_READ_ONLY 0x1
1815#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1816        /**
1817         * Returned handle for the object.
1818         *
1819         * Object handles are nonzero.
1820         */
1821        __u32 handle;
1822};
1823
1824enum drm_i915_oa_format {
1825        I915_OA_FORMAT_A13 = 1,     /* HSW only */
1826        I915_OA_FORMAT_A29,         /* HSW only */
1827        I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
1828        I915_OA_FORMAT_B4_C8,       /* HSW only */
1829        I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
1830        I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
1831        I915_OA_FORMAT_C4_B8,       /* HSW+ */
1832
1833        /* Gen8+ */
1834        I915_OA_FORMAT_A12,
1835        I915_OA_FORMAT_A12_B8_C8,
1836        I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1837
1838        I915_OA_FORMAT_MAX          /* non-ABI */
1839};
1840
1841enum drm_i915_perf_property_id {
1842        /**
1843         * Open the stream for a specific context handle (as used with
1844         * execbuffer2). A stream opened for a specific context this way
1845         * won't typically require root privileges.
1846         */
1847        DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1848
1849        /**
1850         * A value of 1 requests the inclusion of raw OA unit reports as
1851         * part of stream samples.
1852         */
1853        DRM_I915_PERF_PROP_SAMPLE_OA,
1854
1855        /**
1856         * The value specifies which set of OA unit metrics should be
1857         * be configured, defining the contents of any OA unit reports.
1858         */
1859        DRM_I915_PERF_PROP_OA_METRICS_SET,
1860
1861        /**
1862         * The value specifies the size and layout of OA unit reports.
1863         */
1864        DRM_I915_PERF_PROP_OA_FORMAT,
1865
1866        /**
1867         * Specifying this property implicitly requests periodic OA unit
1868         * sampling and (at least on Haswell) the sampling frequency is derived
1869         * from this exponent as follows:
1870         *
1871         *   80ns * 2^(period_exponent + 1)
1872         */
1873        DRM_I915_PERF_PROP_OA_EXPONENT,
1874
1875        DRM_I915_PERF_PROP_MAX /* non-ABI */
1876};
1877
1878struct drm_i915_perf_open_param {
1879        __u32 flags;
1880#define I915_PERF_FLAG_FD_CLOEXEC       (1<<0)
1881#define I915_PERF_FLAG_FD_NONBLOCK      (1<<1)
1882#define I915_PERF_FLAG_DISABLED         (1<<2)
1883
1884        /** The number of u64 (id, value) pairs */
1885        __u32 num_properties;
1886
1887        /**
1888         * Pointer to array of u64 (id, value) pairs configuring the stream
1889         * to open.
1890         */
1891        __u64 properties_ptr;
1892};
1893
1894/**
1895 * Enable data capture for a stream that was either opened in a disabled state
1896 * via I915_PERF_FLAG_DISABLED or was later disabled via
1897 * I915_PERF_IOCTL_DISABLE.
1898 *
1899 * It is intended to be cheaper to disable and enable a stream than it may be
1900 * to close and re-open a stream with the same configuration.
1901 *
1902 * It's undefined whether any pending data for the stream will be lost.
1903 */
1904#define I915_PERF_IOCTL_ENABLE  _IO('i', 0x0)
1905
1906/**
1907 * Disable data capture for a stream.
1908 *
1909 * It is an error to try and read a stream that is disabled.
1910 */
1911#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1912
1913/**
1914 * Common to all i915 perf records
1915 */
1916struct drm_i915_perf_record_header {
1917        __u32 type;
1918        __u16 pad;
1919        __u16 size;
1920};
1921
1922enum drm_i915_perf_record_type {
1923
1924        /**
1925         * Samples are the work horse record type whose contents are extensible
1926         * and defined when opening an i915 perf stream based on the given
1927         * properties.
1928         *
1929         * Boolean properties following the naming convention
1930         * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1931         * every sample.
1932         *
1933         * The order of these sample properties given by userspace has no
1934         * affect on the ordering of data within a sample. The order is
1935         * documented here.
1936         *
1937         * struct {
1938         *     struct drm_i915_perf_record_header header;
1939         *
1940         *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1941         * };
1942         */
1943        DRM_I915_PERF_RECORD_SAMPLE = 1,
1944
1945        /*
1946         * Indicates that one or more OA reports were not written by the
1947         * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1948         * command collides with periodic sampling - which would be more likely
1949         * at higher sampling frequencies.
1950         */
1951        DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1952
1953        /**
1954         * An error occurred that resulted in all pending OA reports being lost.
1955         */
1956        DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1957
1958        DRM_I915_PERF_RECORD_MAX /* non-ABI */
1959};
1960
1961/**
1962 * Structure to upload perf dynamic configuration into the kernel.
1963 */
1964struct drm_i915_perf_oa_config {
1965        /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1966        char uuid[36];
1967
1968        __u32 n_mux_regs;
1969        __u32 n_boolean_regs;
1970        __u32 n_flex_regs;
1971
1972        /*
1973         * These fields are pointers to tuples of u32 values (register address,
1974         * value). For example the expected length of the buffer pointed by
1975         * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1976         */
1977        __u64 mux_regs_ptr;
1978        __u64 boolean_regs_ptr;
1979        __u64 flex_regs_ptr;
1980};
1981
1982struct drm_i915_query_item {
1983        __u64 query_id;
1984#define DRM_I915_QUERY_TOPOLOGY_INFO    1
1985#define DRM_I915_QUERY_ENGINE_INFO      2
1986/* Must be kept compact -- no holes and well documented */
1987
1988        /*
1989         * When set to zero by userspace, this is filled with the size of the
1990         * data to be written at the data_ptr pointer. The kernel sets this
1991         * value to a negative value to signal an error on a particular query
1992         * item.
1993         */
1994        __s32 length;
1995
1996        /*
1997         * Unused for now. Must be cleared to zero.
1998         */
1999        __u32 flags;
2000
2001        /*
2002         * Data will be written at the location pointed by data_ptr when the
2003         * value of length matches the length of the data to be written by the
2004         * kernel.
2005         */
2006        __u64 data_ptr;
2007};
2008
2009struct drm_i915_query {
2010        __u32 num_items;
2011
2012        /*
2013         * Unused for now. Must be cleared to zero.
2014         */
2015        __u32 flags;
2016
2017        /*
2018         * This points to an array of num_items drm_i915_query_item structures.
2019         */
2020        __u64 items_ptr;
2021};
2022
2023/*
2024 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2025 *
2026 * data: contains the 3 pieces of information :
2027 *
2028 * - the slice mask with one bit per slice telling whether a slice is
2029 *   available. The availability of slice X can be queried with the following
2030 *   formula :
2031 *
2032 *           (data[X / 8] >> (X % 8)) & 1
2033 *
2034 * - the subslice mask for each slice with one bit per subslice telling
2035 *   whether a subslice is available. The availability of subslice Y in slice
2036 *   X can be queried with the following formula :
2037 *
2038 *           (data[subslice_offset +
2039 *                 X * subslice_stride +
2040 *                 Y / 8] >> (Y % 8)) & 1
2041 *
2042 * - the EU mask for each subslice in each slice with one bit per EU telling
2043 *   whether an EU is available. The availability of EU Z in subslice Y in
2044 *   slice X can be queried with the following formula :
2045 *
2046 *           (data[eu_offset +
2047 *                 (X * max_subslices + Y) * eu_stride +
2048 *                 Z / 8] >> (Z % 8)) & 1
2049 */
2050struct drm_i915_query_topology_info {
2051        /*
2052         * Unused for now. Must be cleared to zero.
2053         */
2054        __u16 flags;
2055
2056        __u16 max_slices;
2057        __u16 max_subslices;
2058        __u16 max_eus_per_subslice;
2059
2060        /*
2061         * Offset in data[] at which the subslice masks are stored.
2062         */
2063        __u16 subslice_offset;
2064
2065        /*
2066         * Stride at which each of the subslice masks for each slice are
2067         * stored.
2068         */
2069        __u16 subslice_stride;
2070
2071        /*
2072         * Offset in data[] at which the EU masks are stored.
2073         */
2074        __u16 eu_offset;
2075
2076        /*
2077         * Stride at which each of the EU masks for each subslice are stored.
2078         */
2079        __u16 eu_stride;
2080
2081        __u8 data[];
2082};
2083
2084/**
2085 * struct drm_i915_engine_info
2086 *
2087 * Describes one engine and it's capabilities as known to the driver.
2088 */
2089struct drm_i915_engine_info {
2090        /** Engine class and instance. */
2091        struct i915_engine_class_instance engine;
2092
2093        /** Reserved field. */
2094        __u32 rsvd0;
2095
2096        /** Engine flags. */
2097        __u64 flags;
2098
2099        /** Capabilities of this engine. */
2100        __u64 capabilities;
2101#define I915_VIDEO_CLASS_CAPABILITY_HEVC                (1 << 0)
2102#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC     (1 << 1)
2103
2104        /** Reserved fields. */
2105        __u64 rsvd1[4];
2106};
2107
2108/**
2109 * struct drm_i915_query_engine_info
2110 *
2111 * Engine info query enumerates all engines known to the driver by filling in
2112 * an array of struct drm_i915_engine_info structures.
2113 */
2114struct drm_i915_query_engine_info {
2115        /** Number of struct drm_i915_engine_info structs following. */
2116        __u32 num_engines;
2117
2118        /** MBZ */
2119        __u32 rsvd[3];
2120
2121        /** Marker for drm_i915_engine_info structures. */
2122        struct drm_i915_engine_info engines[];
2123};
2124
2125#if defined(__cplusplus)
2126}
2127#endif
2128
2129#endif /* _UAPI_I915_DRM_H_ */
2130